@@ -103,6 +103,9 @@ def parse_args():
103103 help = "When destroying a cluster, delete the security groups that were created" )
104104 parser .add_option ("--use-existing-master" , action = "store_true" , default = False ,
105105 help = "Launch fresh slaves, but use an existing stopped master if possible" )
106+ parser .add_option ("--worker-instances" , type = "int" , default = 1 ,
107+ help = "Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: 1)" )
108+
106109
107110 (opts , args ) = parser .parse_args ()
108111 if len (args ) != 2 :
@@ -223,7 +226,7 @@ def launch_cluster(conn, opts, cluster_name):
223226 sys .exit (1 )
224227 if opts .key_pair is None :
225228 print >> stderr , "ERROR: Must provide a key pair name (-k) to use on instances."
226- sys .exit (1 )
229+ sys .exit (1 )
227230 print "Setting up security groups..."
228231 master_group = get_or_make_group (conn , cluster_name + "-master" )
229232 slave_group = get_or_make_group (conn , cluster_name + "-slaves" )
@@ -551,7 +554,8 @@ def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
551554 "modules" : '\n ' .join (modules ),
552555 "spark_version" : spark_v ,
553556 "shark_version" : shark_v ,
554- "hadoop_major_version" : opts .hadoop_major_version
557+ "hadoop_major_version" : opts .hadoop_major_version ,
558+ "spark_worker_instances" : opts .worker_instances
555559 }
556560
557561 # Create a temp directory in which we will place all the files to be
0 commit comments