@@ -36,15 +36,15 @@ If you wish to use pre-built docker images, you may use the images published in
3636<tr ><th >Component</th ><th >Image</th ></tr >
3737<tr >
3838 <td >Spark Driver Image</td >
39- <td ><code >kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
39+ <td ><code >kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 </code ></td >
4040</tr >
4141<tr >
4242 <td >Spark Executor Image</td >
43- <td ><code >kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
43+ <td ><code >kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 </code ></td >
4444</tr >
4545<tr >
4646 <td >Spark Initialization Image</td >
47- <td ><code >kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
47+ <td ><code >kubespark/spark-init:v2.1.0-kubernetes-0.2.0 </code ></td >
4848</tr >
4949</table >
5050
@@ -76,9 +76,9 @@ are set up as described above:
7676 --kubernetes-namespace default \
7777 --conf spark.executor.instances=5 \
7878 --conf spark.app.name=spark-pi \
79- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
80- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
81- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
79+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
80+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
81+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
8282 local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
8383
8484The Spark master, specified either via passing the ` --master ` command line argument to ` spark-submit ` or by setting
@@ -125,9 +125,9 @@ and then you can compute the value of Pi as follows:
125125 --kubernetes-namespace default \
126126 --conf spark.executor.instances=5 \
127127 --conf spark.app.name=spark-pi \
128- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
129- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
130- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
128+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
129+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
130+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
131131 --conf spark.kubernetes.resourceStagingServer.uri=http://<address-of-any-cluster-node>:31000 \
132132 examples/jars/spark_examples_2.11-2.2.0.jar
133133
@@ -168,9 +168,9 @@ If our local proxy were listening on port 8001, we would have our submission loo
168168 --kubernetes-namespace default \
169169 --conf spark.executor.instances=5 \
170170 --conf spark.app.name=spark-pi \
171- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
172- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
173- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
171+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
172+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
173+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
174174 local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
175175
176176Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library.
@@ -284,9 +284,9 @@ communicate with the resource staging server over TLS. The trustStore can be set
284284 --kubernetes-namespace default \
285285 --conf spark.executor.instances=5 \
286286 --conf spark.app.name=spark-pi \
287- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
288- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
289- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
287+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
288+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
289+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
290290 --conf spark.kubernetes.resourceStagingServer.uri=https://<address-of-any-cluster-node>:31000 \
291291 --conf spark.ssl.kubernetes.resourceStagingServer.enabled=true \
292292 --conf spark.ssl.kubernetes.resourceStagingServer.clientCertPem=/home/myuser/cert.pem \
0 commit comments