From 26697fc545ba816dedc789b186a26c9d8636f4e6 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Mon, 12 Nov 2018 13:44:03 +0000 Subject: [PATCH 1/8] [SPARK-26015][K8S] Set a default UID for Spark on K8S Images Adds USER directives to the Dockerfiles which is configurable via build argument for easy customisation. A -u flag is added to bin/docker-image-tool.sh to make it easy to customise this. --- bin/docker-image-tool.sh | 15 ++++++++++++--- .../docker/src/main/dockerfiles/spark/Dockerfile | 6 ++++++ .../main/dockerfiles/spark/bindings/R/Dockerfile | 9 +++++++++ .../dockerfiles/spark/bindings/python/Dockerfile | 9 +++++++++ .../src/main/dockerfiles/spark/entrypoint.sh | 4 ++++ 5 files changed, 40 insertions(+), 3 deletions(-) diff --git a/bin/docker-image-tool.sh b/bin/docker-image-tool.sh index 9f735f1148da..79216244377e 100755 --- a/bin/docker-image-tool.sh +++ b/bin/docker-image-tool.sh @@ -132,6 +132,11 @@ function build { SPARK_ROOT="$CTX_DIR/base" fi + # If a custom SPARK_UID was set add it to build arguments + if [ -n "$SPARK_UID" ]; then + BUILD_ARGS+=(--build-arg spark_uid=$SPARK_UID) + fi + # Verify that the Docker image content directory is present if [ ! -d "$SPARK_ROOT/kubernetes/dockerfiles" ]; then error "Cannot find docker image. This script must be run from a runnable distribution of Apache Spark." @@ -207,8 +212,10 @@ Options: -t tag Tag to apply to the built image, or to identify the image to be pushed. -m Use minikube's Docker daemon. -n Build docker image with --no-cache - -b arg Build arg to build or push the image. For multiple build args, this option needs to - be used separately for each build arg. + -u uid UID to use in the USER directive to set the user the main Spark process runs as inside the + resulting container + -b arg Build arg to build or push the image. For multiple build args, this option needs to + be used separately for each build arg. Using minikube when building images will do so directly into minikube's Docker daemon. There is no need to push the images into minikube in that case, they'll be automatically @@ -243,7 +250,8 @@ PYDOCKERFILE= RDOCKERFILE= NOCACHEARG= BUILD_PARAMS= -while getopts f:p:R:mr:t:nb: option +SPARK_UID= +while getopts f:p:R:mr:t:nb:u: option do case "${option}" in @@ -263,6 +271,7 @@ do fi eval $(minikube docker-env) ;; + u) SPARK_UID=${OPTARG};; esac done diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile index 89b20e144622..90346a3386c1 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile @@ -17,6 +17,8 @@ FROM openjdk:8-alpine +ARG spark_uid=185 + # Before building the docker image, first build and make a Spark distribution following # the instructions in http://spark.apache.org/docs/latest/building-spark.html. # If this docker file is being used in the context of building your images from a Spark @@ -47,5 +49,9 @@ COPY data /opt/spark/data ENV SPARK_HOME /opt/spark WORKDIR /opt/spark/work-dir +RUN chmod g+w /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] + +# Specify the User that the actual main process will run as +USER ${spark_uid} \ No newline at end of file diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile index 9f67422efeb3..9ded57c65510 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile @@ -16,8 +16,14 @@ # ARG base_img +ARG spark_uid=185 + FROM $base_img WORKDIR / + +# Reset to root to run installation tasks +USER 0 + RUN mkdir ${SPARK_HOME}/R RUN apk add --no-cache R R-dev @@ -27,3 +33,6 @@ ENV R_HOME /usr/lib/R WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] + +# Specify the User that the actual main process will run as +USER ${spark_uid} diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile index 69b6efa6149a..de1a0617b1cc 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile @@ -16,8 +16,14 @@ # ARG base_img +ARG spark_uid=185 + FROM $base_img WORKDIR / + +# Reset to root to run installation tasks +USER 0 + RUN mkdir ${SPARK_HOME}/python # TODO: Investigate running both pip and pip3 via virtualenvs RUN apk add --no-cache python && \ @@ -37,3 +43,6 @@ ENV PYTHONPATH ${SPARK_HOME}/python/lib/pyspark.zip:${SPARK_HOME}/python/lib/py4 WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] + +# Specify the User that the actual main process will run as +USER ${spark_uid} diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh index 2b2a4e4cf6bc..94440291317a 100755 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh @@ -30,6 +30,10 @@ set -e # If there is no passwd entry for the container UID, attempt to create one if [ -z "$uidentry" ] ; then if [ -w /etc/passwd ] ; then + # TODO Should we allow providing an environment variable to set the desired username? + # SPARK_USER_NAME is the obvious candidate here but we only propagate this to the + # pods when using Hadoop therefore we'd need to move that to a feature step that + # always runs e.g. Basic(Driver|Executor)FeatureStep echo "$myuid:x:$myuid:$mygid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd else echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID" From 6680c134043c922ab2e613177897dae3a0b6d672 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Thu, 15 Nov 2018 14:14:27 +0000 Subject: [PATCH 2/8] [SPARK-26015][K8S] Fix broken client mode test The client mode test was incorrectly overriding the entry point of the image so didn't benefit from the logic that set up the /etc/passwd entry for the container UID resulting in no home directory and a failed Ivy setup as a result --- .../deploy/k8s/integrationtest/ClientModeTestsSuite.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala index c8bd584516ea..45e0277248e9 100644 --- a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala +++ b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala @@ -17,13 +17,13 @@ package org.apache.spark.deploy.k8s.integrationtest import org.scalatest.concurrent.Eventually -import scala.collection.JavaConverters._ import org.apache.spark.deploy.k8s.integrationtest.KubernetesSuite.{k8sTestTag, INTERVAL, TIMEOUT} +import org.scalatest.Tag private[spark] trait ClientModeTestsSuite { k8sSuite: KubernetesSuite => - test("Run in client mode.", k8sTestTag) { + test("Run in client mode.", k8sTestTag, Tag("k8s-client-mode")) { val labels = Map("spark-app-selector" -> driverPodName) val driverPort = 7077 val blockManagerPort = 10000 @@ -62,11 +62,12 @@ private[spark] trait ClientModeTestsSuite { k8sSuite: KubernetesSuite => .endMetadata() .withNewSpec() .withServiceAccountName(kubernetesTestComponents.serviceAccountName) + .withRestartPolicy("Never") .addNewContainer() .withName("spark-example") .withImage(image) .withImagePullPolicy("IfNotPresent") - .withCommand("/opt/spark/bin/run-example") + .addToArgs("/opt/spark/bin/run-example") .addToArgs("--master", s"k8s://https://kubernetes.default.svc") .addToArgs("--deploy-mode", "client") .addToArgs("--conf", s"spark.kubernetes.container.image=$image") From 11419e34b6c9d01208c91f3d58ebd34e71a374c4 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Thu, 15 Nov 2018 14:16:02 +0000 Subject: [PATCH 3/8] [SPARK-26015][K8S] If SPARK_USER_NAME is present use it If SPARK_USER_NAME is set for the pod then use it as part of the /etc/passwd entry we create --- .../docker/src/main/dockerfiles/spark/entrypoint.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh index 94440291317a..2d770075a074 100755 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh @@ -30,11 +30,7 @@ set -e # If there is no passwd entry for the container UID, attempt to create one if [ -z "$uidentry" ] ; then if [ -w /etc/passwd ] ; then - # TODO Should we allow providing an environment variable to set the desired username? - # SPARK_USER_NAME is the obvious candidate here but we only propagate this to the - # pods when using Hadoop therefore we'd need to move that to a feature step that - # always runs e.g. Basic(Driver|Executor)FeatureStep - echo "$myuid:x:$myuid:$mygid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd + echo "$myuid:x:$myuid:$mygid:${SPARK_USER_NAME:-anonymous uid}:$SPARK_HOME:/bin/false" >> /etc/passwd else echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID" fi From db1e83ade775a931d3f64b811e946b19ade0eacb Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Fri, 16 Nov 2018 09:50:21 +0000 Subject: [PATCH 4/8] [SPARK-26015][K8S] Address PR Comments - Add line breaks for clarity - Remove extra test tag --- .../kubernetes/docker/src/main/dockerfiles/spark/Dockerfile | 1 + .../docker/src/main/dockerfiles/spark/bindings/R/Dockerfile | 2 ++ .../src/main/dockerfiles/spark/bindings/python/Dockerfile | 2 ++ .../deploy/k8s/integrationtest/ClientModeTestsSuite.scala | 3 +-- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile index 90346a3386c1..063930d1816c 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile @@ -54,4 +54,5 @@ RUN chmod g+w /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as + USER ${spark_uid} \ No newline at end of file diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile index 9ded57c65510..477bb4a027a5 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile @@ -22,6 +22,7 @@ FROM $base_img WORKDIR / # Reset to root to run installation tasks + USER 0 RUN mkdir ${SPARK_HOME}/R @@ -35,4 +36,5 @@ WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as + USER ${spark_uid} diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile index de1a0617b1cc..7dda321bb6c8 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile @@ -22,6 +22,7 @@ FROM $base_img WORKDIR / # Reset to root to run installation tasks + USER 0 RUN mkdir ${SPARK_HOME}/python @@ -45,4 +46,5 @@ WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as + USER ${spark_uid} diff --git a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala index 45e0277248e9..7aec3a1b6f57 100644 --- a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala +++ b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala @@ -19,11 +19,10 @@ package org.apache.spark.deploy.k8s.integrationtest import org.scalatest.concurrent.Eventually import org.apache.spark.deploy.k8s.integrationtest.KubernetesSuite.{k8sTestTag, INTERVAL, TIMEOUT} -import org.scalatest.Tag private[spark] trait ClientModeTestsSuite { k8sSuite: KubernetesSuite => - test("Run in client mode.", k8sTestTag, Tag("k8s-client-mode")) { + test("Run in client mode.", k8sTestTag) { val labels = Map("spark-app-selector" -> driverPodName) val driverPort = 7077 val blockManagerPort = 10000 From fd37bdd002a58d7451b84c442d26abd42ee7ce42 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Fri, 16 Nov 2018 09:52:27 +0000 Subject: [PATCH 5/8] [SPARK-26015][K8S] Fix line breaks --- .../kubernetes/docker/src/main/dockerfiles/spark/Dockerfile | 3 +-- .../docker/src/main/dockerfiles/spark/bindings/R/Dockerfile | 2 -- .../src/main/dockerfiles/spark/bindings/python/Dockerfile | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile index 063930d1816c..084304032470 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/Dockerfile @@ -54,5 +54,4 @@ RUN chmod g+w /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as - -USER ${spark_uid} \ No newline at end of file +USER ${spark_uid} diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile index 477bb4a027a5..9ded57c65510 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/R/Dockerfile @@ -22,7 +22,6 @@ FROM $base_img WORKDIR / # Reset to root to run installation tasks - USER 0 RUN mkdir ${SPARK_HOME}/R @@ -36,5 +35,4 @@ WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as - USER ${spark_uid} diff --git a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile index 7dda321bb6c8..de1a0617b1cc 100644 --- a/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile +++ b/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/bindings/python/Dockerfile @@ -22,7 +22,6 @@ FROM $base_img WORKDIR / # Reset to root to run installation tasks - USER 0 RUN mkdir ${SPARK_HOME}/python @@ -46,5 +45,4 @@ WORKDIR /opt/spark/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] # Specify the User that the actual main process will run as - USER ${spark_uid} From 6c6232e6366ea31bc6b126817d0383743f3bb382 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Tue, 20 Nov 2018 11:15:39 +0000 Subject: [PATCH 6/8] [SPARK-26015][K8S] Updates docs for USER changes --- docs/running-on-kubernetes.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md index 2c01e1e7155e..3c2f934584e2 100644 --- a/docs/running-on-kubernetes.md +++ b/docs/running-on-kubernetes.md @@ -19,9 +19,9 @@ Please see [Spark Security](security.html) and the specific advice below before ## User Identity -Images built from the project provided Dockerfiles do not contain any [`USER`](https://docs.docker.com/engine/reference/builder/#user) directives. This means that the resulting images will be running the Spark processes as `root` inside the container. On unsecured clusters this may provide an attack vector for privilege escalation and container breakout. Therefore security conscious deployments should consider providing custom images with `USER` directives specifying an unprivileged UID and GID. +Images built from the project provided Dockerfiles contain a default [`USER`](https://docs.docker.com/engine/reference/builder/#user) directive with a default UID of `185`. This means that the resulting images will be running the Spark processes as this UID inside the container. Security conscious deployments should consider providing custom images with `USER` directives specifying their desired unprivileged UID and GID. The resulting UID should include the root group in its supplementary groups in order to be able to run the Spark executables. Users building their own images with the provided `docker-image-tool.sh` script can use the `-u ` option to specify the desired UID. -Alternatively the [Pod Template](#pod-template) feature can be used to add a [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#volumes-and-file-systems) with a `runAsUser` to the pods that Spark submits. Please bear in mind that this requires cooperation from your users and as such may not be a suitable solution for shared environments. Cluster administrators should use [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups) if they wish to limit the users that pods may run as. +Alternatively the [Pod Template](#pod-template) feature can be used to add a [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#volumes-and-file-systems) with a `runAsUser` to the pods that Spark submits. This can be used to override the `USER` directives in the images themselves. Please bear in mind that this requires cooperation from your users and as such may not be a suitable solution for shared environments. Cluster administrators should use [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups) if they wish to limit the users that pods may run as. ## Volume Mounts @@ -87,6 +87,7 @@ Example usage is: $ ./bin/docker-image-tool.sh -r -t my-tag build $ ./bin/docker-image-tool.sh -r -t my-tag push ``` +This will build using the projects provided default `Dockerfiles`.To see more options available for customising the behaviour of this tool, including providing custom `Dockerfiles`, please run with the `-h` flag. By default `bin/docker-image-tool.sh` builds docker image for running JVM jobs. You need to opt-in to build additional language binding docker images. From 72fb30c2decac0fe9406bced518b10730cf33006 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Tue, 20 Nov 2018 11:18:56 +0000 Subject: [PATCH 7/8] [SPARK-26015][K8S] Fix incorrectly removed import --- .../spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala index 7aec3a1b6f57..2720cdf74ca8 100644 --- a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala +++ b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ClientModeTestsSuite.scala @@ -17,6 +17,7 @@ package org.apache.spark.deploy.k8s.integrationtest import org.scalatest.concurrent.Eventually +import scala.collection.JavaConverters._ import org.apache.spark.deploy.k8s.integrationtest.KubernetesSuite.{k8sTestTag, INTERVAL, TIMEOUT} From 8ab866b8f872a1d1ac0c43b374048c05893824c9 Mon Sep 17 00:00:00 2001 From: Rob Vesse Date: Thu, 29 Nov 2018 11:12:36 +0000 Subject: [PATCH 8/8] [SPARK-26015][K8S] Fix up changes to align with master changes --- bin/docker-image-tool.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bin/docker-image-tool.sh b/bin/docker-image-tool.sh index 79216244377e..fbf9c9e448fd 100755 --- a/bin/docker-image-tool.sh +++ b/bin/docker-image-tool.sh @@ -132,11 +132,6 @@ function build { SPARK_ROOT="$CTX_DIR/base" fi - # If a custom SPARK_UID was set add it to build arguments - if [ -n "$SPARK_UID" ]; then - BUILD_ARGS+=(--build-arg spark_uid=$SPARK_UID) - fi - # Verify that the Docker image content directory is present if [ ! -d "$SPARK_ROOT/kubernetes/dockerfiles" ]; then error "Cannot find docker image. This script must be run from a runnable distribution of Apache Spark." @@ -151,6 +146,12 @@ function build { fi local BUILD_ARGS=(${BUILD_PARAMS}) + + # If a custom SPARK_UID was set add it to build arguments + if [ -n "$SPARK_UID" ]; then + BUILD_ARGS+=(--build-arg spark_uid=$SPARK_UID) + fi + local BINDING_BUILD_ARGS=( ${BUILD_PARAMS} --build-arg