@@ -32,17 +32,17 @@ import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
3232class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkContext {
3333 private val conf = new org.apache.spark.SparkConf ()
3434 .setAppName(getClass.getName)
35- .set(SPARK_MASTER , " local-cluster[20 ,1,512]" )
35+ .set(SPARK_MASTER , " local-cluster[5 ,1,512]" )
3636 .set(EXECUTOR_MEMORY , " 512m" )
3737 .set(DYN_ALLOCATION_ENABLED , true )
3838 .set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED , true )
39- .set(DYN_ALLOCATION_INITIAL_EXECUTORS , 20 )
39+ .set(DYN_ALLOCATION_INITIAL_EXECUTORS , 5 )
4040 .set(WORKER_DECOMMISSION_ENABLED , true )
4141
4242 test(" Worker decommission and executor idle timeout" ) {
4343 sc = new SparkContext (conf.set(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT .key, " 10s" ))
4444 withSpark(sc) { sc =>
45- TestUtils .waitUntilExecutorsUp(sc, 20 , 60000 )
45+ TestUtils .waitUntilExecutorsUp(sc, 5 , 60000 )
4646 val rdd1 = sc.parallelize(1 to 10 , 2 )
4747 val rdd2 = rdd1.map(x => (1 , x))
4848 val rdd3 = rdd2.reduceByKey(_ + _)
@@ -54,10 +54,10 @@ class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkConte
5454 }
5555 }
5656
57- test(" Decommission 19 executors from 20 executors in total" ) {
57+ test(" Decommission 4 executors from 5 executors in total" ) {
5858 sc = new SparkContext (conf)
5959 withSpark(sc) { sc =>
60- TestUtils .waitUntilExecutorsUp(sc, 20 , 60000 )
60+ TestUtils .waitUntilExecutorsUp(sc, 5 , 60000 )
6161 val rdd1 = sc.parallelize(1 to 100000 , 200 )
6262 val rdd2 = rdd1.map(x => (x % 100 , x))
6363 val rdd3 = rdd2.reduceByKey(_ + _)
0 commit comments