Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit 7321a3e

Browse files
committed
Address comments
1 parent 3c4aff2 commit 7321a3e

File tree

3 files changed

+27
-25
lines changed

3 files changed

+27
-25
lines changed

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/kubernetes/Client.scala

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -64,17 +64,17 @@ private[spark] class Client(
6464
.getOrElse(Array.empty[String])
6565

6666
// Memory settings
67-
private val driverMemory = sparkConf.get("spark.driver.memory", "1g")
68-
private val driverMemoryBytes = Utils.byteStringAsBytes(driverMemory)
69-
private val driverSubmitServerMemory = sparkConf.get(KUBERNETES_DRIVER_SUBMIT_SERVER_MEMORY)
70-
private val driverSubmitServerMemoryBytes = Utils.byteStringAsBytes(driverSubmitServerMemoryBytes)
71-
private val driverContainerMemoryBytes = driverMemoryBytes + driverSubmitServerMemoryBytes
72-
private val memoryOverheadBytes = sparkConf
67+
private val driverMemoryMb = sparkConf.get(org.apache.spark.internal.config.DRIVER_MEMORY)
68+
private val driverSubmitServerMemoryMb = sparkConf.get(KUBERNETES_DRIVER_SUBMIT_SERVER_MEMORY)
69+
private val driverSubmitServerMemoryString = sparkConf.get(
70+
KUBERNETES_DRIVER_SUBMIT_SERVER_MEMORY.key,
71+
KUBERNETES_DRIVER_SUBMIT_SERVER_MEMORY.defaultValueString)
72+
private val driverContainerMemoryMb = driverMemoryMb + driverSubmitServerMemoryMb
73+
private val memoryOverheadMb = sparkConf
7374
.get(KUBERNETES_DRIVER_MEMORY_OVERHEAD)
74-
.map(overhead => Utils.byteStringAsBytes(overhead))
75-
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * driverContainerMemoryBytes).toInt,
75+
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * driverContainerMemoryMb).toInt,
7676
MEMORY_OVERHEAD_MIN))
77-
private val driverContainerMemoryWithOverhead = driverContainerMemoryBytes + memoryOverheadBytes
77+
private val driverContainerMemoryWithOverhead = driverContainerMemoryMb + memoryOverheadMb
7878

7979
private val waitForAppCompletion: Boolean = sparkConf.get(WAIT_FOR_APP_COMPLETION)
8080

@@ -387,10 +387,10 @@ private[spark] class Client(
387387
.withNewPort(SUBMISSION_SERVER_PORT_NAME)
388388
.build()
389389
val driverMemoryQuantity = new QuantityBuilder(false)
390-
.withAmount(driverContainerMemoryBytes.toString)
390+
.withAmount(s"${driverContainerMemoryMb}M")
391391
.build()
392392
val driverMemoryLimitQuantity = new QuantityBuilder(false)
393-
.withAmount(driverContainerMemoryWithOverhead.toString)
393+
.withAmount(s"${driverContainerMemoryWithOverhead}M")
394394
.build()
395395
kubernetesClient.pods().createNew()
396396
.withNewMetadata()
@@ -428,7 +428,7 @@ private[spark] class Client(
428428
// Note that SPARK_DRIVER_MEMORY only affects the REST server via spark-class.
429429
.addNewEnv()
430430
.withName(ENV_DRIVER_MEMORY)
431-
.withValue(driverSubmitServerMemory)
431+
.withValue(driverSubmitServerMemoryString)
432432
.endEnv()
433433
.addToEnv(sslConfiguration.sslPodEnvVars: _*)
434434
.withNewResources()

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/kubernetes/config.scala

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import java.util.concurrent.TimeUnit
2020

2121
import org.apache.spark.{SPARK_VERSION => sparkVersion}
2222
import org.apache.spark.internal.config.ConfigBuilder
23+
import org.apache.spark.network.util.ByteUnit
2324

2425
package object config {
2526

@@ -103,7 +104,7 @@ package object config {
103104
| overheads, etc. This tends to grow with the executor size
104105
| (typically 6-10%).
105106
""".stripMargin)
106-
.stringConf
107+
.bytesConf(ByteUnit.MiB)
107108
.createOptional
108109

109110
private[spark] val KUBERNETES_DRIVER_MEMORY_OVERHEAD =
@@ -115,7 +116,7 @@ package object config {
115116
| interned strings, other native overheads, etc. This tends
116117
| to grow with the driver's memory size (typically 6-10%).
117118
""".stripMargin)
118-
.stringConf
119+
.bytesConf(ByteUnit.MiB)
119120
.createOptional
120121

121122
private[spark] val KUBERNETES_DRIVER_LABELS =
@@ -173,8 +174,8 @@ package object config {
173174
.doc("""
174175
| The amount of memory to allocate for the driver submission server.
175176
""".stripMargin)
176-
.stringConf
177-
.createWithDefault("256m")
177+
.bytesConf(ByteUnit.MiB)
178+
.createWithDefaultString("256m")
178179

179180
private[spark] val EXPOSE_KUBERNETES_DRIVER_SERVICE_UI_PORT =
180181
ConfigBuilder("spark.kubernetes.driver.service.exposeUiPort")

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/kubernetes/KubernetesClusterSchedulerBackend.scala

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -60,15 +60,16 @@ private[spark] class KubernetesClusterSchedulerBackend(
6060
.getOrElse(
6161
throw new SparkException("Must specify the driver pod name"))
6262

63-
private val executorMemory = conf.get("spark.executor.memory", "1g")
64-
private val executorMemoryBytes = Utils.byteStringAsBytes(executorMemory)
63+
private val executorMemoryMb = conf.get(org.apache.spark.internal.config.EXECUTOR_MEMORY)
64+
private val executorMemoryString = conf.get(
65+
org.apache.spark.internal.config.EXECUTOR_MEMORY.key,
66+
org.apache.spark.internal.config.EXECUTOR_MEMORY.defaultValueString)
6567

66-
private val memoryOverheadBytes = conf
68+
private val memoryOverheadMb = conf
6769
.get(KUBERNETES_EXECUTOR_MEMORY_OVERHEAD)
68-
.map(overhead => Utils.byteStringAsBytes(overhead))
69-
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * executorMemoryBytes).toInt,
70+
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * executorMemoryMb).toInt,
7071
MEMORY_OVERHEAD_MIN))
71-
private val executorMemoryWithOverhead = executorMemoryBytes + memoryOverheadBytes
72+
private val executorMemoryWithOverhead = executorMemoryMb + memoryOverheadMb
7273

7374
private val executorCores = conf.getOption("spark.executor.cores").getOrElse("1")
7475

@@ -165,10 +166,10 @@ private[spark] class KubernetesClusterSchedulerBackend(
165166
val selectors = Map(SPARK_EXECUTOR_ID_LABEL -> executorId,
166167
SPARK_APP_ID_LABEL -> applicationId()).asJava
167168
val executorMemoryQuantity = new QuantityBuilder(false)
168-
.withAmount(executorMemoryBytes.toString)
169+
.withAmount(s"${executorMemoryMb}M")
169170
.build()
170171
val executorMemoryLimitQuantity = new QuantityBuilder(false)
171-
.withAmount(executorMemoryWithOverhead.toString)
172+
.withAmount(s"${executorMemoryWithOverhead}M")
172173
.build()
173174
val executorCpuQuantity = new QuantityBuilder(false)
174175
.withAmount(executorCores)
@@ -177,7 +178,7 @@ private[spark] class KubernetesClusterSchedulerBackend(
177178
(ENV_EXECUTOR_PORT, executorPort.toString),
178179
(ENV_DRIVER_URL, driverUrl),
179180
(ENV_EXECUTOR_CORES, executorCores),
180-
(ENV_EXECUTOR_MEMORY, executorMemory),
181+
(ENV_EXECUTOR_MEMORY, executorMemoryString),
181182
(ENV_APPLICATION_ID, applicationId()),
182183
(ENV_EXECUTOR_ID, executorId)
183184
).map(env => new EnvVarBuilder()

0 commit comments

Comments
 (0)