1616 */
1717package org .apache .spark .deploy .k8s
1818
19- import org .apache .spark .{ SPARK_VERSION => sparkVersion }
19+ import org .apache .spark .SPARK_VERSION
2020import org .apache .spark .internal .Logging
2121import org .apache .spark .internal .config .ConfigBuilder
2222import org .apache .spark .network .util .ByteUnit
2323
24- package object config extends Logging {
24+ private [spark] object config extends Logging {
2525
26- private [spark] val KUBERNETES_NAMESPACE =
26+ val KUBERNETES_NAMESPACE =
2727 ConfigBuilder (" spark.kubernetes.namespace" )
2828 .doc(" The namespace that will be used for running the driver and executor pods. When using" +
2929 " spark-submit in cluster mode, this can also be passed to spark-submit via the" +
3030 " --kubernetes-namespace command line argument." )
3131 .stringConf
3232 .createWithDefault(" default" )
3333
34- private [spark] val EXECUTOR_DOCKER_IMAGE =
34+ val EXECUTOR_DOCKER_IMAGE =
3535 ConfigBuilder (" spark.kubernetes.executor.docker.image" )
3636 .doc(" Docker image to use for the executors. Specify this using the standard Docker tag" +
3737 " format." )
3838 .stringConf
39- .createWithDefault(s " spark-executor: $sparkVersion " )
39+ .createWithDefault(s " spark-executor: $SPARK_VERSION " )
4040
41- private [spark] val DOCKER_IMAGE_PULL_POLICY =
41+ val DOCKER_IMAGE_PULL_POLICY =
4242 ConfigBuilder (" spark.kubernetes.docker.image.pullPolicy" )
4343 .doc(" Docker image pull policy when pulling any docker image in Kubernetes integration" )
4444 .stringConf
4545 .createWithDefault(" IfNotPresent" )
4646
47- private [spark] val APISERVER_AUTH_DRIVER_CONF_PREFIX =
47+ val APISERVER_AUTH_DRIVER_CONF_PREFIX =
4848 " spark.kubernetes.authenticate.driver"
49- private [spark] val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX =
49+ val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX =
5050 " spark.kubernetes.authenticate.driver.mounted"
51- private [spark] val OAUTH_TOKEN_CONF_SUFFIX = " oauthToken"
52- private [spark] val OAUTH_TOKEN_FILE_CONF_SUFFIX = " oauthTokenFile"
53- private [spark] val CLIENT_KEY_FILE_CONF_SUFFIX = " clientKeyFile"
54- private [spark] val CLIENT_CERT_FILE_CONF_SUFFIX = " clientCertFile"
55- private [spark] val CA_CERT_FILE_CONF_SUFFIX = " caCertFile"
51+ val OAUTH_TOKEN_CONF_SUFFIX = " oauthToken"
52+ val OAUTH_TOKEN_FILE_CONF_SUFFIX = " oauthTokenFile"
53+ val CLIENT_KEY_FILE_CONF_SUFFIX = " clientKeyFile"
54+ val CLIENT_CERT_FILE_CONF_SUFFIX = " clientCertFile"
55+ val CA_CERT_FILE_CONF_SUFFIX = " caCertFile"
5656
57- private [spark] val KUBERNETES_SERVICE_ACCOUNT_NAME =
57+ val KUBERNETES_SERVICE_ACCOUNT_NAME =
5858 ConfigBuilder (s " $APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName " )
5959 .doc(" Service account that is used when running the driver pod. The driver pod uses" +
6060 " this service account when requesting executor pods from the API server. If specific" +
@@ -66,49 +66,49 @@ package object config extends Logging {
6666 // Note that while we set a default for this when we start up the
6767 // scheduler, the specific default value is dynamically determined
6868 // based on the executor memory.
69- private [spark] val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
69+ val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
7070 ConfigBuilder (" spark.kubernetes.executor.memoryOverhead" )
7171 .doc(" The amount of off-heap memory (in megabytes) to be allocated per executor. This" +
7272 " is memory that accounts for things like VM overheads, interned strings, other native" +
7373 " overheads, etc. This tends to grow with the executor size. (typically 6-10%)." )
7474 .bytesConf(ByteUnit .MiB )
7575 .createOptional
7676
77- private [spark] val KUBERNETES_EXECUTOR_LABEL_PREFIX = " spark.kubernetes.executor.label."
78- private [spark] val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = " spark.kubernetes.executor.annotation."
77+ val KUBERNETES_EXECUTOR_LABEL_PREFIX = " spark.kubernetes.executor.label."
78+ val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = " spark.kubernetes.executor.annotation."
7979
80- private [spark] val KUBERNETES_DRIVER_POD_NAME =
80+ val KUBERNETES_DRIVER_POD_NAME =
8181 ConfigBuilder (" spark.kubernetes.driver.pod.name" )
8282 .doc(" Name of the driver pod." )
8383 .stringConf
8484 .createOptional
8585
86- private [spark] val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
86+ val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
8787 ConfigBuilder (" spark.kubernetes.executor.podNamePrefix" )
8888 .doc(" Prefix to use in front of the executor pod names." )
8989 .internal()
9090 .stringConf
9191 .createWithDefault(" spark" )
9292
93- private [spark] val KUBERNETES_ALLOCATION_BATCH_SIZE =
93+ val KUBERNETES_ALLOCATION_BATCH_SIZE =
9494 ConfigBuilder (" spark.kubernetes.allocation.batch.size" )
9595 .doc(" Number of pods to launch at once in each round of executor allocation." )
9696 .intConf
9797 .checkValue(value => value > 0 , " Allocation batch size should be a positive integer" )
9898 .createWithDefault(5 )
9999
100- private [spark] val KUBERNETES_ALLOCATION_BATCH_DELAY =
100+ val KUBERNETES_ALLOCATION_BATCH_DELAY =
101101 ConfigBuilder (" spark.kubernetes.allocation.batch.delay" )
102102 .doc(" Number of seconds to wait between each round of executor allocation." )
103103 .longConf
104104 .checkValue(value => value > 0 , s " Allocation batch delay should be a positive integer " )
105105 .createWithDefault(1 )
106106
107- private [spark] val KUBERNETES_EXECUTOR_LIMIT_CORES =
107+ val KUBERNETES_EXECUTOR_LIMIT_CORES =
108108 ConfigBuilder (" spark.kubernetes.executor.limit.cores" )
109109 .doc(" Specify the hard cpu limit for a single executor pod" )
110110 .stringConf
111111 .createOptional
112112
113- private [spark] val KUBERNETES_NODE_SELECTOR_PREFIX = " spark.kubernetes.node.selector."
113+ val KUBERNETES_NODE_SELECTOR_PREFIX = " spark.kubernetes.node.selector."
114114}
0 commit comments