@@ -24,16 +24,16 @@ private[spark] object Config extends Logging {
2424
2525 val KUBERNETES_NAMESPACE =
2626 ConfigBuilder (" spark.kubernetes.namespace" )
27- .doc(" The namespace that will be used for running the driver and executor pods. When using" +
28- " spark-submit in cluster mode, this can also be passed to spark-submit via the" +
29- " --kubernetes-namespace command line argument." )
27+ .doc(" The namespace that will be used for running the driver and executor pods. When using " +
28+ " spark-submit in cluster mode, this can also be passed to spark-submit via the " +
29+ " --kubernetes-namespace command line argument." )
3030 .stringConf
3131 .createWithDefault(" default" )
3232
3333 val EXECUTOR_DOCKER_IMAGE =
3434 ConfigBuilder (" spark.kubernetes.executor.docker.image" )
35- .doc(" Docker image to use for the executors. Specify this using the standard Docker tag" +
36- " format." )
35+ .doc(" Docker image to use for the executors. Specify this using the standard Docker tag " +
36+ " format." )
3737 .stringConf
3838 .createOptional
3939
@@ -56,10 +56,10 @@ private[spark] object Config extends Logging {
5656
5757 val KUBERNETES_SERVICE_ACCOUNT_NAME =
5858 ConfigBuilder (s " $APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName " )
59- .doc(" Service account that is used when running the driver pod. The driver pod uses" +
60- " this service account when requesting executor pods from the API server. If specific" +
61- " credentials are given for the driver pod to use, the driver will favor" +
62- " using those credentials instead." )
59+ .doc(" Service account that is used when running the driver pod. The driver pod uses " +
60+ " this service account when requesting executor pods from the API server. If specific " +
61+ " credentials are given for the driver pod to use, the driver will favor " +
62+ " using those credentials instead." )
6363 .stringConf
6464 .createOptional
6565
@@ -68,9 +68,9 @@ private[spark] object Config extends Logging {
6868 // based on the executor memory.
6969 val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
7070 ConfigBuilder (" spark.kubernetes.executor.memoryOverhead" )
71- .doc(" The amount of off-heap memory (in megabytes) to be allocated per executor. This" +
72- " is memory that accounts for things like VM overheads, interned strings, other native" +
73- " overheads, etc. This tends to grow with the executor size. (typically 6-10%)." )
71+ .doc(" The amount of off-heap memory (in megabytes) to be allocated per executor. This " +
72+ " is memory that accounts for things like VM overheads, interned strings, other native " +
73+ " overheads, etc. This tends to grow with the executor size. (typically 6-10%)." )
7474 .bytesConf(ByteUnit .MiB )
7575 .createOptional
7676
@@ -117,7 +117,7 @@ private[spark] object Config extends Logging {
117117 .intConf
118118 .checkValue(value => value > 0 , " Maximum attempts of checks of executor lost reason " +
119119 " must be a positive integer" )
120- .createWithDefault(5 )
120+ .createWithDefault(10 )
121121
122122 val KUBERNETES_NODE_SELECTOR_PREFIX = " spark.kubernetes.node.selector."
123123}
0 commit comments