From aafc078f1a42f7a1081a9e930dc882fd5b530444 Mon Sep 17 00:00:00 2001 From: foxish Date: Mon, 5 Jun 2017 17:29:04 -0700 Subject: [PATCH 1/2] Update tags --- docs/running-on-kubernetes.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md index a88b0d380fac..36b45526dfb4 100644 --- a/docs/running-on-kubernetes.md +++ b/docs/running-on-kubernetes.md @@ -36,15 +36,15 @@ If you wish to use pre-built docker images, you may use the images published in ComponentImage Spark Driver Image - kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 + kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 Spark Executor Image - kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 + kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 Spark Initialization Image - kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 + kubespark/spark-init:v2.1.0-kubernetes-0.2.0 @@ -76,9 +76,9 @@ are set up as described above: --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar The Spark master, specified either via passing the `--master` command line argument to `spark-submit` or by setting @@ -125,9 +125,9 @@ and then you can compute the value of Pi as follows: --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ --conf spark.kubernetes.resourceStagingServer.uri=http://:31000 \ examples/jars/spark_examples_2.11-2.2.0.jar @@ -168,9 +168,9 @@ If our local proxy were listening on port 8001, we would have our submission loo --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library. @@ -284,9 +284,9 @@ communicate with the resource staging server over TLS. The trustStore can be set --kubernetes-namespace default \ --conf spark.executor.instances=5 \ --conf spark.app.name=spark-pi \ - --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \ - --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \ + --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \ + --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \ --conf spark.kubernetes.resourceStagingServer.uri=https://:31000 \ --conf spark.ssl.kubernetes.resourceStagingServer.enabled=true \ --conf spark.ssl.kubernetes.resourceStagingServer.clientCertPem=/home/myuser/cert.pem \ From b5c7ec137b5619958bbec01bf714a28131b3b336 Mon Sep 17 00:00:00 2001 From: foxish Date: Mon, 5 Jun 2017 17:31:15 -0700 Subject: [PATCH 2/2] update tags in conf directory --- conf/kubernetes-resource-staging-server.yaml | 2 +- conf/kubernetes-shuffle-service.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/kubernetes-resource-staging-server.yaml b/conf/kubernetes-resource-staging-server.yaml index 11f5d3a13b9e..025b9b125d9e 100644 --- a/conf/kubernetes-resource-staging-server.yaml +++ b/conf/kubernetes-resource-staging-server.yaml @@ -32,7 +32,7 @@ spec: name: spark-resource-staging-server-config containers: - name: spark-resource-staging-server - image: kubespark/spark-resource-staging-server:v2.1.0-kubernetes-0.1.0-alpha.3 + image: kubespark/spark-resource-staging-server:v2.1.0-kubernetes-0.2.0 resources: requests: cpu: 100m diff --git a/conf/kubernetes-shuffle-service.yaml b/conf/kubernetes-shuffle-service.yaml index c0cc310cf475..55c170b01a4f 100644 --- a/conf/kubernetes-shuffle-service.yaml +++ b/conf/kubernetes-shuffle-service.yaml @@ -38,7 +38,7 @@ spec: # This is an official image that is built # from the dockerfiles/shuffle directory # in the spark distribution. - image: spark-shuffle:latest + image: kubespark/spark-shuffle:v2.1.0-kubernetes-0.2.0 imagePullPolicy: IfNotPresent volumeMounts: - mountPath: '/tmp'