Skip to content

Commit 2ba9b6a

Browse files
tritabsrowen
authored andcommitted
[SPARK-11518][DEPLOY, WINDOWS] Handle spaces in Windows command scripts
Author: Jon Maurer <[email protected]> Author: Jonathan Maurer <[email protected]> Closes #10789 from tritab/cmd_updates.
1 parent 9269036 commit 2ba9b6a

14 files changed

+27
-30
lines changed

bin/beeline.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@ rem limitations under the License.
1818
rem
1919

2020
set SPARK_HOME=%~dp0..
21-
cmd /V /E /C %SPARK_HOME%\bin\spark-class.cmd org.apache.hive.beeline.BeeLine %*
21+
cmd /V /E /C "%SPARK_HOME%\bin\spark-class.cmd" org.apache.hive.beeline.BeeLine %*

bin/load-spark-env.cmd

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,16 @@ if [%SPARK_ENV_LOADED%] == [] (
2727
if not [%SPARK_CONF_DIR%] == [] (
2828
set user_conf_dir=%SPARK_CONF_DIR%
2929
) else (
30-
set user_conf_dir=%~dp0..\conf
30+
set user_conf_dir=..\conf
3131
)
3232

3333
call :LoadSparkEnv
3434
)
3535

3636
rem Setting SPARK_SCALA_VERSION if not already set.
3737

38-
set ASSEMBLY_DIR2=%SPARK_HOME%/assembly/target/scala-2.11
39-
set ASSEMBLY_DIR1=%SPARK_HOME%/assembly/target/scala-2.10
38+
set ASSEMBLY_DIR2="%SPARK_HOME%\assembly\target\scala-2.11"
39+
set ASSEMBLY_DIR1="%SPARK_HOME%\assembly\target\scala-2.10"
4040

4141
if [%SPARK_SCALA_VERSION%] == [] (
4242

bin/pyspark.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ rem
2020
rem This is the entry point for running PySpark. To avoid polluting the
2121
rem environment, it just launches a new cmd to do the real work.
2222

23-
cmd /V /E /C %~dp0pyspark2.cmd %*
23+
cmd /V /E /C "%~dp0pyspark2.cmd" %*

bin/pyspark2.cmd

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ rem
2020
rem Figure out where the Spark framework is installed
2121
set SPARK_HOME=%~dp0..
2222

23-
call %SPARK_HOME%\bin\load-spark-env.cmd
23+
call "%SPARK_HOME%\bin\load-spark-env.cmd"
2424
set _SPARK_CMD_USAGE=Usage: bin\pyspark.cmd [options]
2525

2626
rem Figure out which Python to use.
@@ -35,4 +35,4 @@ set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.9.1-src.zip;%PYTHONPATH%
3535
set OLD_PYTHONSTARTUP=%PYTHONSTARTUP%
3636
set PYTHONSTARTUP=%SPARK_HOME%\python\pyspark\shell.py
3737

38-
call %SPARK_HOME%\bin\spark-submit2.cmd pyspark-shell-main --name "PySparkShell" %*
38+
call "%SPARK_HOME%\bin\spark-submit2.cmd" pyspark-shell-main --name "PySparkShell" %*

bin/run-example.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ rem
2020
rem This is the entry point for running a Spark example. To avoid polluting
2121
rem the environment, it just launches a new cmd to do the real work.
2222

23-
cmd /V /E /C %~dp0run-example2.cmd %*
23+
cmd /V /E /C "%~dp0run-example2.cmd" %*

bin/run-example2.cmd

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,9 @@ rem
2020
set SCALA_VERSION=2.10
2121

2222
rem Figure out where the Spark framework is installed
23-
set FWDIR=%~dp0..\
23+
set SPARK_HOME=%~dp0..
2424

25-
rem Export this as SPARK_HOME
26-
set SPARK_HOME=%FWDIR%
27-
28-
call %SPARK_HOME%\bin\load-spark-env.cmd
25+
call "%SPARK_HOME%\bin\load-spark-env.cmd"
2926

3027
rem Test that an argument was given
3128
if not "x%1"=="x" goto arg_given
@@ -36,12 +33,12 @@ if not "x%1"=="x" goto arg_given
3633
goto exit
3734
:arg_given
3835

39-
set EXAMPLES_DIR=%FWDIR%examples
36+
set EXAMPLES_DIR=%SPARK_HOME%\examples
4037

4138
rem Figure out the JAR file that our examples were packaged into.
4239
set SPARK_EXAMPLES_JAR=
43-
if exist "%FWDIR%RELEASE" (
44-
for %%d in ("%FWDIR%lib\spark-examples*.jar") do (
40+
if exist "%SPARK_HOME%\RELEASE" (
41+
for %%d in ("%SPARK_HOME%\lib\spark-examples*.jar") do (
4542
set SPARK_EXAMPLES_JAR=%%d
4643
)
4744
) else (
@@ -80,7 +77,7 @@ if "%~1" neq "" (
8077
)
8178
if defined ARGS set ARGS=%ARGS:~1%
8279

83-
call "%FWDIR%bin\spark-submit.cmd" ^
80+
call "%SPARK_HOME%\bin\spark-submit.cmd" ^
8481
--master %EXAMPLE_MASTER% ^
8582
--class %EXAMPLE_CLASS% ^
8683
"%SPARK_EXAMPLES_JAR%" %ARGS%

bin/spark-class.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ rem
2020
rem This is the entry point for running a Spark class. To avoid polluting
2121
rem the environment, it just launches a new cmd to do the real work.
2222

23-
cmd /V /E /C %~dp0spark-class2.cmd %*
23+
cmd /V /E /C "%~dp0spark-class2.cmd" %*

bin/spark-class2.cmd

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ rem
2020
rem Figure out where the Spark framework is installed
2121
set SPARK_HOME=%~dp0..
2222

23-
call %SPARK_HOME%\bin\load-spark-env.cmd
23+
call "%SPARK_HOME%\bin\load-spark-env.cmd"
2424

2525
rem Test that an argument was given
2626
if "x%1"=="x" (
@@ -32,9 +32,9 @@ rem Find assembly jar
3232
set SPARK_ASSEMBLY_JAR=0
3333

3434
if exist "%SPARK_HOME%\RELEASE" (
35-
set ASSEMBLY_DIR=%SPARK_HOME%\lib
35+
set ASSEMBLY_DIR="%SPARK_HOME%\lib"
3636
) else (
37-
set ASSEMBLY_DIR=%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION%
37+
set ASSEMBLY_DIR="%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION%"
3838
)
3939

4040
for %%d in (%ASSEMBLY_DIR%\spark-assembly*hadoop*.jar) do (
@@ -50,7 +50,7 @@ set LAUNCH_CLASSPATH=%SPARK_ASSEMBLY_JAR%
5050

5151
rem Add the launcher build dir to the classpath if requested.
5252
if not "x%SPARK_PREPEND_CLASSES%"=="x" (
53-
set LAUNCH_CLASSPATH=%SPARK_HOME%\launcher\target\scala-%SPARK_SCALA_VERSION%\classes;%LAUNCH_CLASSPATH%
53+
set LAUNCH_CLASSPATH="%SPARK_HOME%\launcher\target\scala-%SPARK_SCALA_VERSION%\classes;%LAUNCH_CLASSPATH%"
5454
)
5555

5656
set _SPARK_ASSEMBLY=%SPARK_ASSEMBLY_JAR%
@@ -62,7 +62,7 @@ if not "x%JAVA_HOME%"=="x" set RUNNER=%JAVA_HOME%\bin\java
6262
rem The launcher library prints the command to be executed in a single line suitable for being
6363
rem executed by the batch interpreter. So read all the output of the launcher into a variable.
6464
set LAUNCHER_OUTPUT=%temp%\spark-class-launcher-output-%RANDOM%.txt
65-
"%RUNNER%" -cp %LAUNCH_CLASSPATH% org.apache.spark.launcher.Main %* > %LAUNCHER_OUTPUT%
65+
"%RUNNER%" -cp "%LAUNCH_CLASSPATH%" org.apache.spark.launcher.Main %* > %LAUNCHER_OUTPUT%
6666
for /f "tokens=*" %%i in (%LAUNCHER_OUTPUT%) do (
6767
set SPARK_CMD=%%i
6868
)

bin/spark-shell.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ rem
2020
rem This is the entry point for running Spark shell. To avoid polluting the
2121
rem environment, it just launches a new cmd to do the real work.
2222

23-
cmd /V /E /C %~dp0spark-shell2.cmd %*
23+
cmd /V /E /C "%~dp0spark-shell2.cmd" %*

bin/spark-shell2.cmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,4 @@ if "x%SPARK_SUBMIT_OPTS%"=="x" (
3232
set SPARK_SUBMIT_OPTS="%SPARK_SUBMIT_OPTS% -Dscala.usejavacp=true"
3333

3434
:run_shell
35-
%SPARK_HOME%\bin\spark-submit2.cmd --class org.apache.spark.repl.Main --name "Spark shell" %*
35+
"%SPARK_HOME%\bin\spark-submit2.cmd" --class org.apache.spark.repl.Main --name "Spark shell" %*

0 commit comments

Comments
 (0)