Skip to content

Commit 3790334

Browse files
committed
Address comments
1 parent 8b29c74 commit 3790334

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,17 @@ import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
3232
class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkContext {
3333
private val conf = new org.apache.spark.SparkConf()
3434
.setAppName(getClass.getName)
35-
.set(SPARK_MASTER, "local-cluster[10,1,512]")
35+
.set(SPARK_MASTER, "local-cluster[5,1,512]")
3636
.set(EXECUTOR_MEMORY, "512m")
3737
.set(DYN_ALLOCATION_ENABLED, true)
3838
.set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED, true)
39-
.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 10)
39+
.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)
4040
.set(WORKER_DECOMMISSION_ENABLED, true)
4141

4242
test("Worker decommission and executor idle timeout") {
4343
sc = new SparkContext(conf.set(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key, "10s"))
4444
withSpark(sc) { sc =>
45-
TestUtils.waitUntilExecutorsUp(sc, 10, 60000)
45+
TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
4646
val rdd1 = sc.parallelize(1 to 10, 2)
4747
val rdd2 = rdd1.map(x => (1, x))
4848
val rdd3 = rdd2.reduceByKey(_ + _)
@@ -54,10 +54,10 @@ class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkConte
5454
}
5555
}
5656

57-
test("Decommission 9 executors from 10 executors in total") {
57+
test("Decommission 4 executors from 5 executors in total") {
5858
sc = new SparkContext(conf)
5959
withSpark(sc) { sc =>
60-
TestUtils.waitUntilExecutorsUp(sc, 10, 60000)
60+
TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
6161
val rdd1 = sc.parallelize(1 to 100000, 200)
6262
val rdd2 = rdd1.map(x => (x % 100, x))
6363
val rdd3 = rdd2.reduceByKey(_ + _)

0 commit comments

Comments
 (0)