From d37aef412407518c6ff7b4dd799c9b18f58657fa Mon Sep 17 00:00:00 2001 From: foxish Date: Wed, 5 Apr 2017 14:25:13 -0700 Subject: [PATCH] Updating images in doc --- docs/running-on-kubernetes.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md index 15c44873fd98b..3a352919a9794 100644 --- a/docs/running-on-kubernetes.md +++ b/docs/running-on-kubernetes.md @@ -24,11 +24,11 @@ If you wish to use pre-built docker images, you may use the images published in ComponentImage Spark Driver Image - kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-rc1 + kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 Spark Executor Image - kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-rc1 + kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 @@ -44,7 +44,7 @@ For example, if the registry host is `registry-host` and the registry is listeni docker build -t registry-host:5000/spark-executor:latest -f dockerfiles/executor/Dockerfile . docker push registry-host:5000/spark-driver:latest docker push registry-host:5000/spark-executor:latest - + ## Submitting Applications to Kubernetes Kubernetes applications can be executed via `spark-submit`. For example, to compute the value of pi, assuming the images @@ -57,8 +57,8 @@ are set up as described above: --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-rc1 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-rc1 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ examples/jars/spark_examples_2.11-2.2.0.jar The Spark master, specified either via passing the `--master` command line argument to `spark-submit` or by setting @@ -78,7 +78,7 @@ In the above example, the specific Kubernetes cluster can be used with spark sub Note that applications can currently only be executed in cluster mode, where the driver and its executors are running on the cluster. - + ### Specifying input files Spark supports specifying JAR paths that are either on the submitting host's disk, or are located on the disk of the @@ -108,8 +108,8 @@ If our local proxy were listening on port 8001, we would have our submission loo --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-rc1 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-rc1 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ examples/jars/spark_examples_2.11-2.2.0.jar Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library.