Skip to content

Commit dea7bc4

Browse files
committed
[SPARK-32100][CORE][TESTS][FOLLOWUP] Reduce the required test resources
### What changes were proposed in this pull request? This PR aims to reduce the required test resources in WorkerDecommissionExtendedSuite. ### Why are the changes needed? When Jenkins farms is crowded, the following failure happens currently [here](https://amplab.cs.berkeley.edu/jenkins/view/Spark%20QA%20Test%20(Dashboard)/job/spark-master-test-sbt-hadoop-3.2-hive-2.3/890/testReport/junit/org.apache.spark.scheduler/WorkerDecommissionExtendedSuite/Worker_decommission_and_executor_idle_timeout/) ``` java.util.concurrent.TimeoutException: Can't find 20 executors before 60000 milliseconds elapsed at org.apache.spark.TestUtils$.waitUntilExecutorsUp(TestUtils.scala:326) at org.apache.spark.scheduler.WorkerDecommissionExtendedSuite.$anonfun$new$2(WorkerDecommissionExtendedSuite.scala:45) ``` ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Pass the Jenkins. Closes apache#29001 from dongjoon-hyun/SPARK-32100-2. Authored-by: Dongjoon Hyun <[email protected]> Signed-off-by: Dongjoon Hyun <[email protected]>
1 parent 0e33b5e commit dea7bc4

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,17 @@ import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
3232
class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkContext {
3333
private val conf = new org.apache.spark.SparkConf()
3434
.setAppName(getClass.getName)
35-
.set(SPARK_MASTER, "local-cluster[20,1,512]")
35+
.set(SPARK_MASTER, "local-cluster[5,1,512]")
3636
.set(EXECUTOR_MEMORY, "512m")
3737
.set(DYN_ALLOCATION_ENABLED, true)
3838
.set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED, true)
39-
.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 20)
39+
.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)
4040
.set(WORKER_DECOMMISSION_ENABLED, true)
4141

4242
test("Worker decommission and executor idle timeout") {
4343
sc = new SparkContext(conf.set(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key, "10s"))
4444
withSpark(sc) { sc =>
45-
TestUtils.waitUntilExecutorsUp(sc, 20, 60000)
45+
TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
4646
val rdd1 = sc.parallelize(1 to 10, 2)
4747
val rdd2 = rdd1.map(x => (1, x))
4848
val rdd3 = rdd2.reduceByKey(_ + _)
@@ -54,10 +54,10 @@ class WorkerDecommissionExtendedSuite extends SparkFunSuite with LocalSparkConte
5454
}
5555
}
5656

57-
test("Decommission 19 executors from 20 executors in total") {
57+
test("Decommission 4 executors from 5 executors in total") {
5858
sc = new SparkContext(conf)
5959
withSpark(sc) { sc =>
60-
TestUtils.waitUntilExecutorsUp(sc, 20, 60000)
60+
TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
6161
val rdd1 = sc.parallelize(1 to 100000, 200)
6262
val rdd2 = rdd1.map(x => (x % 100, x))
6363
val rdd3 = rdd2.reduceByKey(_ + _)

0 commit comments

Comments
 (0)