Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/running-on-yarn.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,14 @@ Most of the configs are the same for Spark on YARN as for other deployment modes
</tr>
<tr>
<td><code>spark.yarn.executor.memoryOverhead</code></td>
<td>384</td>
<td>executorMemory * 0.06, with minimum of 384 </td>
<td>
The amount of off heap memory (in megabytes) to be allocated per executor. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc.
</td>
</tr>
<tr>
<td><code>spark.yarn.driver.memoryOverhead</code></td>
<td>384</td>
<td>driverMemory * 0.06, with minimum of 384 </td>
<td>
The amount of off heap memory (in megabytes) to be allocated per driver. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc.
</td>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ class ExecutorLauncher(args: ApplicationMasterArguments, conf: Configuration, sp

if (minimumMemory > 0) {
val mem = args.executorMemory + sparkConf.getInt("spark.yarn.executor.memoryOverhead",
YarnAllocationHandler.MEMORY_OVERHEAD)
math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * args.executorMemory).toInt,
YarnAllocationHandler.MEMORY_OVERHEAD_MIN))
val numCore = (mem / minimumMemory) + (if (0 != (mem % minimumMemory)) 1 else 0)

if (numCore > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ private[yarn] class YarnAllocationHandler(

// Additional memory overhead - in mb.
private def memoryOverhead: Int = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
YarnAllocationHandler.MEMORY_OVERHEAD)
math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * executorMemory).toInt,
YarnAllocationHandler.MEMORY_OVERHEAD_MIN))

private val numExecutorsRunning = new AtomicInteger()
// Used to generate a unique id per executor
Expand Down Expand Up @@ -548,8 +549,9 @@ object YarnAllocationHandler {
// request types (like map/reduce in hadoop for example)
val PRIORITY = 1

// Additional memory overhead - in mb
val MEMORY_OVERHEAD = 384
// Additional memory overhead
val MEMORY_OVERHEAD_FACTOR = 0.06
val MEMORY_OVERHEAD_MIN = 384

// Host to rack map - saved from allocation requests
// We are expecting this not to change.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,8 @@ trait ClientBase extends Logging {

// Additional memory overhead - in mb.
protected def memoryOverhead: Int = sparkConf.getInt("spark.yarn.driver.memoryOverhead",
YarnAllocationHandler.MEMORY_OVERHEAD)
math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * args.amMemory).toInt,
YarnAllocationHandler.MEMORY_OVERHEAD_MIN))

// TODO(harvey): This could just go in ClientArguments.
def validateArgs() = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,8 @@ private[yarn] class YarnAllocationHandler(

// Additional memory overhead - in mb.
private def memoryOverhead: Int = sparkConf.getInt("spark.yarn.executor.memoryOverhead",
YarnAllocationHandler.MEMORY_OVERHEAD)
math.max((YarnAllocationHandler.MEMORY_OVERHEAD_FACTOR * executorMemory).toInt,
YarnAllocationHandler.MEMORY_OVERHEAD_MIN))

// Number of container requests that have been sent to, but not yet allocated by the
// ApplicationMaster.
Expand Down Expand Up @@ -562,8 +563,9 @@ object YarnAllocationHandler {
// request types (like map/reduce in hadoop for example)
val PRIORITY = 1

// Additional memory overhead - in mb.
val MEMORY_OVERHEAD = 384
// Additional memory overhead
val MEMORY_OVERHEAD_FACTOR = 0.06
val MEMORY_OVERHEAD_MIN = 384

// Host to rack map - saved from allocation requests. We are expecting this not to change.
// Note that it is possible for this to change : and ResurceManager will indicate that to us via
Expand Down