From 156f711b5a9f3b3e942ac36f42d6d4cc6c44aa50 Mon Sep 17 00:00:00 2001 From: Nick White Date: Fri, 6 May 2016 10:42:18 +0100 Subject: [PATCH] [SPARK-15176][Core] Add maxRunningTasks setting to Pools Help guarantee resource availablity by (hierarchically) limiting the amount of tasks a given pool can run. Also adds support for specifying the parent pool in the "spark.scheduler.allocation.file". --- .../org/apache/spark/scheduler/Pool.scala | 1 + .../apache/spark/scheduler/Schedulable.scala | 13 ++ .../spark/scheduler/SchedulableBuilder.scala | 51 +++++-- .../spark/scheduler/TaskSchedulerImpl.scala | 2 +- .../spark/scheduler/TaskSetManager.scala | 3 +- core/src/test/resources/fairscheduler.xml | 2 + core/src/test/resources/nestedpool.xml | 60 +++++++++ .../apache/spark/scheduler/PoolSuite.scala | 72 +++++++--- .../spark/scheduler/TaskSetManagerSuite.scala | 125 +++++++++++++++++- docs/job-scheduling.md | 7 +- 10 files changed, 302 insertions(+), 34 deletions(-) create mode 100644 core/src/test/resources/nestedpool.xml diff --git a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala index 2a69a6c5e8790..7fe2feb884e47 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala @@ -32,6 +32,7 @@ private[spark] class Pool( val poolName: String, val schedulingMode: SchedulingMode, initMinShare: Int, + val initMaxRunningTasks: Int, initWeight: Int) extends Schedulable with Logging { diff --git a/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala b/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala index b6f88ed0a93aa..bf98dda6ca2ca 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala @@ -34,11 +34,24 @@ private[spark] trait Schedulable { def schedulingMode: SchedulingMode def weight: Int def minShare: Int + val initMaxRunningTasks: Int def runningTasks: Int def priority: Int def stageId: Int def name: String + /** + * How much space for new tasks is there in this Schedulable? + */ + def maxRunningTasks: Int = { + val myMaxRunningTasks = math.max(0, initMaxRunningTasks - runningTasks) + if (parent == null) { + myMaxRunningTasks + } else { + math.min(myMaxRunningTasks, parent.maxRunningTasks) + } + } + def addSchedulable(schedulable: Schedulable): Unit def removeSchedulable(schedulable: Schedulable): Unit def getSchedulableByName(name: String): Schedulable diff --git a/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala b/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala index 96325a0329f89..ea8f4859338b2 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala @@ -59,12 +59,15 @@ private[spark] class FairSchedulableBuilder(val rootPool: Pool, conf: SparkConf) val FAIR_SCHEDULER_PROPERTIES = "spark.scheduler.pool" val DEFAULT_POOL_NAME = "default" val MINIMUM_SHARES_PROPERTY = "minShare" + val MAXIMUM_RUNNING_TASKS_PROPERTY = "maxRunningTasks" val SCHEDULING_MODE_PROPERTY = "schedulingMode" + val PARENT_PROPERTY = "parent" val WEIGHT_PROPERTY = "weight" val POOL_NAME_PROPERTY = "@name" val POOLS_PROPERTY = "pool" val DEFAULT_SCHEDULING_MODE = SchedulingMode.FIFO val DEFAULT_MINIMUM_SHARE = 0 + val DEFAULT_MAXIMUM_RUNNING_TASKS = Int.MaxValue val DEFAULT_WEIGHT = 1 override def buildPools() { @@ -87,13 +90,20 @@ private[spark] class FairSchedulableBuilder(val rootPool: Pool, conf: SparkConf) buildDefaultPool() } + private def logPoolCreated(pool: Pool) = + logInfo(s"""Created pool ${pool.name}, + | parent: ${Option(pool.parent).map(_.name).getOrElse("(none)")}, + | schedulingMode: ${pool.schedulingMode}, + | minShare: ${pool.minShare}, + | maxRunningTasks: ${pool.maxRunningTasks}, + | weight: ${pool.weight}""".stripMargin) + private def buildDefaultPool() { if (rootPool.getSchedulableByName(DEFAULT_POOL_NAME) == null) { val pool = new Pool(DEFAULT_POOL_NAME, DEFAULT_SCHEDULING_MODE, - DEFAULT_MINIMUM_SHARE, DEFAULT_WEIGHT) + DEFAULT_MINIMUM_SHARE, DEFAULT_MAXIMUM_RUNNING_TASKS, DEFAULT_WEIGHT) rootPool.addSchedulable(pool) - logInfo("Created default pool %s, schedulingMode: %s, minShare: %d, weight: %d".format( - DEFAULT_POOL_NAME, DEFAULT_SCHEDULING_MODE, DEFAULT_MINIMUM_SHARE, DEFAULT_WEIGHT)) + logPoolCreated(pool) } } @@ -104,7 +114,9 @@ private[spark] class FairSchedulableBuilder(val rootPool: Pool, conf: SparkConf) val poolName = (poolNode \ POOL_NAME_PROPERTY).text var schedulingMode = DEFAULT_SCHEDULING_MODE var minShare = DEFAULT_MINIMUM_SHARE + var maxRunningTasks = DEFAULT_MAXIMUM_RUNNING_TASKS var weight = DEFAULT_WEIGHT + var parent = DEFAULT_POOL_NAME val xmlSchedulingMode = (poolNode \ SCHEDULING_MODE_PROPERTY).text if (xmlSchedulingMode != "") { @@ -122,15 +134,30 @@ private[spark] class FairSchedulableBuilder(val rootPool: Pool, conf: SparkConf) minShare = xmlMinShare.toInt } + val xmlMaxShare = (poolNode \ MAXIMUM_RUNNING_TASKS_PROPERTY).text + if (xmlMaxShare != "") { + maxRunningTasks = xmlMaxShare.toInt + } + val xmlWeight = (poolNode \ WEIGHT_PROPERTY).text if (xmlWeight != "") { weight = xmlWeight.toInt } - val pool = new Pool(poolName, schedulingMode, minShare, weight) - rootPool.addSchedulable(pool) - logInfo("Created pool %s, schedulingMode: %s, minShare: %d, weight: %d".format( - poolName, schedulingMode, minShare, weight)) + val xmlParent = (poolNode \ PARENT_PROPERTY).text + if (xmlParent != "") { + parent = xmlParent + } + + val pool = new Pool(poolName, schedulingMode, minShare, maxRunningTasks, weight) + val parentPool = rootPool.getSchedulableByName(parent) + if (parentPool == null) { + logWarning(s"couldn't find parent pool $parent, adding pool to the root") + rootPool.addSchedulable(pool) + } else { + parentPool.addSchedulable(pool) + } + logPoolCreated(pool) } } @@ -143,11 +170,11 @@ private[spark] class FairSchedulableBuilder(val rootPool: Pool, conf: SparkConf) if (parentPool == null) { // we will create a new pool that user has configured in app // instead of being defined in xml file - parentPool = new Pool(poolName, DEFAULT_SCHEDULING_MODE, - DEFAULT_MINIMUM_SHARE, DEFAULT_WEIGHT) - rootPool.addSchedulable(parentPool) - logInfo("Created pool %s, schedulingMode: %s, minShare: %d, weight: %d".format( - poolName, DEFAULT_SCHEDULING_MODE, DEFAULT_MINIMUM_SHARE, DEFAULT_WEIGHT)) + val pool = new Pool(poolName, DEFAULT_SCHEDULING_MODE, + DEFAULT_MINIMUM_SHARE, DEFAULT_MAXIMUM_RUNNING_TASKS, DEFAULT_WEIGHT) + rootPool.addSchedulable(pool) + logPoolCreated(pool) + parentPool = pool } } parentPool.addSchedulable(manager) diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index c3adc286851e5..a7b2df09e1c76 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -135,7 +135,7 @@ private[spark] class TaskSchedulerImpl( def initialize(backend: SchedulerBackend) { this.backend = backend // temporarily set rootPool name to empty - rootPool = new Pool("", schedulingMode, 0, 0) + rootPool = new Pool("", schedulingMode, 0, Int.MaxValue, 0) schedulableBuilder = { schedulingMode match { case SchedulingMode.FIFO => diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala index 2eedd201ca355..99f6c03d77969 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala @@ -97,6 +97,7 @@ private[spark] class TaskSetManager( var parent: Pool = null var totalResultSize = 0L var calculatedTasks = 0 + val initMaxRunningTasks = Int.MaxValue val runningTasksSet = new HashSet[Long] @@ -421,7 +422,7 @@ private[spark] class TaskSetManager( maxLocality: TaskLocality.TaskLocality) : Option[TaskDescription] = { - if (!isZombie) { + if (!isZombie && maxRunningTasks > 0) { val curTime = clock.getTimeMillis() var allowedLocality = maxLocality diff --git a/core/src/test/resources/fairscheduler.xml b/core/src/test/resources/fairscheduler.xml index 996ffb18640dc..9bb83c8504a31 100644 --- a/core/src/test/resources/fairscheduler.xml +++ b/core/src/test/resources/fairscheduler.xml @@ -19,11 +19,13 @@ 2 + 512 1 FIFO 3 + 256 1 FIFO diff --git a/core/src/test/resources/nestedpool.xml b/core/src/test/resources/nestedpool.xml new file mode 100644 index 0000000000000..2242076e99995 --- /dev/null +++ b/core/src/test/resources/nestedpool.xml @@ -0,0 +1,60 @@ + + + + + + 3 + 1024 + 1 + FAIR + + + 4 + 128 + 1 + FAIR + + + 0 + 2 + 512 + 2 + FAIR + + + 0 + 1 + 128 + 1 + FAIR + + + 1 + 2 + 256 + 2 + FAIR + + + 1 + 2 + 64 + 1 + FAIR + + diff --git a/core/src/test/scala/org/apache/spark/scheduler/PoolSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/PoolSuite.scala index 467796d7c24b0..46e6345edeb19 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/PoolSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/PoolSuite.scala @@ -48,7 +48,7 @@ class PoolSuite extends SparkFunSuite with LocalSparkContext { sc = new SparkContext("local", "TaskSchedulerImplSuite") val taskScheduler = new TaskSchedulerImpl(sc) - val rootPool = new Pool("", SchedulingMode.FIFO, 0, 0) + val rootPool = new Pool("", SchedulingMode.FIFO, 0, Int.MaxValue, 0) val schedulableBuilder = new FIFOSchedulableBuilder(rootPool) schedulableBuilder.buildPools() @@ -78,7 +78,7 @@ class PoolSuite extends SparkFunSuite with LocalSparkContext { sc = new SparkContext("local", "TaskSchedulerImplSuite", conf) val taskScheduler = new TaskSchedulerImpl(sc) - val rootPool = new Pool("", SchedulingMode.FAIR, 0, 0) + val rootPool = new Pool("", SchedulingMode.FAIR, 0, Int.MaxValue, 0) val schedulableBuilder = new FairSchedulableBuilder(rootPool, sc.conf) schedulableBuilder.buildPools() @@ -88,10 +88,13 @@ class PoolSuite extends SparkFunSuite with LocalSparkContext { assert(rootPool.getSchedulableByName("2") != null) assert(rootPool.getSchedulableByName("3") != null) assert(rootPool.getSchedulableByName("1").minShare === 2) + assert(rootPool.getSchedulableByName("1").maxRunningTasks === 512) assert(rootPool.getSchedulableByName("1").weight === 1) assert(rootPool.getSchedulableByName("2").minShare === 3) + assert(rootPool.getSchedulableByName("2").maxRunningTasks === 256) assert(rootPool.getSchedulableByName("2").weight === 1) assert(rootPool.getSchedulableByName("3").minShare === 0) + assert(rootPool.getSchedulableByName("3").maxRunningTasks === Int.MaxValue) assert(rootPool.getSchedulableByName("3").weight === 1) val properties1 = new Properties() @@ -134,24 +137,57 @@ class PoolSuite extends SparkFunSuite with LocalSparkContext { } test("Nested Pool Test") { - sc = new SparkContext("local", "TaskSchedulerImplSuite") + val xmlPath = getClass.getClassLoader.getResource("nestedpool.xml").getFile() + val conf = new SparkConf().set("spark.scheduler.allocation.file", xmlPath) + sc = new SparkContext("local", "TaskSchedulerImplSuite", conf) val taskScheduler = new TaskSchedulerImpl(sc) - val rootPool = new Pool("", SchedulingMode.FAIR, 0, 0) - val pool0 = new Pool("0", SchedulingMode.FAIR, 3, 1) - val pool1 = new Pool("1", SchedulingMode.FAIR, 4, 1) - rootPool.addSchedulable(pool0) - rootPool.addSchedulable(pool1) - - val pool00 = new Pool("00", SchedulingMode.FAIR, 2, 2) - val pool01 = new Pool("01", SchedulingMode.FAIR, 1, 1) - pool0.addSchedulable(pool00) - pool0.addSchedulable(pool01) - - val pool10 = new Pool("10", SchedulingMode.FAIR, 2, 2) - val pool11 = new Pool("11", SchedulingMode.FAIR, 2, 1) - pool1.addSchedulable(pool10) - pool1.addSchedulable(pool11) + val rootPool = new Pool("", SchedulingMode.FAIR, 0, Int.MaxValue, 0) + val schedulableBuilder = new FairSchedulableBuilder(rootPool, sc.conf) + schedulableBuilder.buildPools() + + val pool0 = rootPool.getSchedulableByName("0") + val pool1 = rootPool.getSchedulableByName("1") + val pool00 = rootPool.getSchedulableByName("00") + val pool01 = rootPool.getSchedulableByName("01") + val pool10 = rootPool.getSchedulableByName("10") + val pool11 = rootPool.getSchedulableByName("11") + + // Ensure that the XML file was read in correctly. + assert(pool0 != null) + assert(pool1 != null) + assert(pool00 != null) + assert(pool01 != null) + assert(pool10 != null) + assert(pool11 != null) + + assert(pool0.minShare === 3) + assert(pool0.maxRunningTasks === 1024) + assert(pool0.weight === 1) + + assert(pool1.minShare === 4) + assert(pool1.maxRunningTasks === 128) + assert(pool1.weight === 1) + + assert(pool00.minShare === 2) + assert(pool00.maxRunningTasks === 512) + assert(pool00.weight === 2) + assert(pool00.parent === pool0) + + assert(pool01.minShare === 1) + assert(pool01.maxRunningTasks === 128) + assert(pool01.weight === 1) + assert(pool01.parent === pool0) + + assert(pool10.minShare === 2) + assert(pool10.maxRunningTasks === 128) // not 256 due to parent + assert(pool10.weight === 2) + assert(pool10.parent === pool1) + + assert(pool11.minShare === 2) + assert(pool11.maxRunningTasks === 64) + assert(pool11.weight === 1) + assert(pool11.parent === pool1) val taskSetManager000 = createTaskSetManager(0, 5, taskScheduler) val taskSetManager001 = createTaskSetManager(1, 5, taskScheduler) diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala index 1d7c8f4a61857..9081b5885fd1e 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala @@ -318,6 +318,129 @@ class TaskSetManagerSuite extends SparkFunSuite with LocalSparkContext with Logg assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 3) } + test("Scheduler respects maxRunningTasks setting of its pool") { + sc = new SparkContext("local", "test") + val sched = new FakeTaskScheduler(sc, ("exec1", "host1")) + val clock = new ManualClock + + // set up three pools to contend for three resources + val parent = new Pool("parent", SchedulingMode.FAIR, 0, 3, 0) + val child0 = new Pool("child0", SchedulingMode.FAIR, 0, 2, 0) + val child1 = new Pool("child1", SchedulingMode.FAIR, 0, 2, 0) + child0.parent = parent + child1.parent = parent + + // add a taskset for each pool + val parentManager = new TaskSetManager( + sched, FakeTask.createTaskSet(5), MAX_TASK_FAILURES, clock) + val child0Manager = new TaskSetManager( + sched, FakeTask.createTaskSet(3), MAX_TASK_FAILURES, clock) + val child1Manager = new TaskSetManager( + sched, FakeTask.createTaskSet(3), MAX_TASK_FAILURES, clock) + parentManager.parent = parent + child0Manager.parent = child0 + child1Manager.parent = child1 + + // child pool has two tasks... + val c0t0 = child0Manager.resourceOffer("exec1", "host1", ANY).get + assert(c0t0.index === 0) + val c0t1 = child0Manager.resourceOffer("exec1", "host1", ANY).get + assert(c0t1.index === 1) + + // but not three! + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + assert(child0.maxRunningTasks === 0) + assert(child1.maxRunningTasks === 1) + assert(parent.maxRunningTasks === 1) + + // ...until one of the others finishes + child0Manager.handleSuccessfulTask(c0t0.taskId, createTaskResult()) + val c0t2 = child0Manager.resourceOffer("exec1", "host1", ANY).get + assert(c0t2.index === 2) + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // meanwhile, we can add a task to the other child pool + val c1t0 = child1Manager.resourceOffer("exec1", "host1", ANY).get + assert(c1t0.index === 0) + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // as three are in use, we can't add any to the other pools + assert(parentManager.resourceOffer("exec1", "host1", ANY).isEmpty) + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + assert(child0.maxRunningTasks === 0) + assert(child1.maxRunningTasks === 0) + assert(parent.maxRunningTasks === 0) + + // finish another child task, and add a task to the parent + child0Manager.handleSuccessfulTask(c0t1.taskId, createTaskResult()) + val pt0 = parentManager.resourceOffer("exec1", "host1", ANY).get + assert(pt0.index === 0) + assert(parentManager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // three tasks are running, so we can't add things to the other pool + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // finish the parent task. We now have one running in each child pool... + parentManager.handleSuccessfulTask(pt0.taskId, createTaskResult()) + + // so add another to the first child + val c1t1 = child1Manager.resourceOffer("exec1", "host1", ANY).get + assert(c1t1.index === 1) + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // check we still can't add tasks to the other pools + assert(parentManager.resourceOffer("exec1", "host1", ANY).isEmpty) + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // finish the last task of the child pool. It shouldn't schedule any more! + child0Manager.handleSuccessfulTask(c0t2.taskId, createTaskResult()) + assert(child0Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // the child 1 pool is already running two tasks, so even if we offer it + // more resources it can't accept: + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + assert(child0.maxRunningTasks === 1) // limited by parent, otherwise would be 2 + assert(child0Manager.runningTasks === 0) + assert(child1.maxRunningTasks === 0) + assert(child1Manager.runningTasks === 2) + assert(parent.maxRunningTasks === 1) + assert(parentManager.runningTasks === 0) + + // ...so give it to the parent + val pt1 = parentManager.resourceOffer("exec1", "host1", ANY).get + assert(pt1.index === 1) + assert(parentManager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // now, finish all the tasks in the child ppols, and fill up the parent pool + child0Manager.handleSuccessfulTask(c0t2.taskId, createTaskResult()) + child1Manager.handleSuccessfulTask(c1t0.taskId, createTaskResult()) + child1Manager.handleSuccessfulTask(c1t1.taskId, createTaskResult()) + val pt2 = parentManager.resourceOffer("exec1", "host1", ANY).get + val pt3 = parentManager.resourceOffer("exec1", "host1", ANY).get + assert(parentManager.resourceOffer("exec1", "host1", ANY).isEmpty) + + assert(child0.maxRunningTasks === 0) + assert(child0Manager.runningTasks === 0) + assert(child1.maxRunningTasks === 0) + assert(child1Manager.runningTasks === 0) + assert(parent.maxRunningTasks === 0) + assert(parentManager.runningTasks === 3) + + // as the parent's used all three slots in the pool, the child pool can't + // run its tasks: + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + + // at least, until a parent finished + parentManager.handleSuccessfulTask(pt1.taskId, createTaskResult()) + val c1t2 = child1Manager.resourceOffer("exec1", "host1", ANY).get + assert(c1t2.index === 2) + assert(child1Manager.resourceOffer("exec1", "host1", ANY).isEmpty) + } + test("delay scheduling with failed hosts") { sc = new SparkContext("local", "test") val sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"), @@ -840,7 +963,7 @@ class TaskSetManagerSuite extends SparkFunSuite with LocalSparkContext with Logg } private def createTaskResult( - id: Int, + id: Int = 0xDEADBEEF, // default to a meaningless, yet obvious result value accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty): DirectTaskResult[Int] = { val valueSer = SparkEnv.get.serializer.newInstance() new DirectTaskResult[Int](valueSer.serialize(id), accumUpdates) diff --git a/docs/job-scheduling.md b/docs/job-scheduling.md index 40b6cd99cc27f..01f11caa62bef 100644 --- a/docs/job-scheduling.md +++ b/docs/job-scheduling.md @@ -244,6 +244,11 @@ properties: The `minShare` property can therefore be another way to ensure that a pool can always get up to a certain number of resources (e.g. 10 cores) quickly without giving it a high priority for the rest of the cluster. By default, each pool's `minShare` is 0. +* `maxRunningTasks`: Limit the number of tasks this pool can run concurrently. This includes all the + tasks run by nested pools. +* `parent`: Make this pool a child of the pool named in the `parent` element. The parent pool must be + declared before the child in the configuration file; if not (or if the parent isn't declared at all) + the pool becomes a top-level pool. The pool properties can be set by creating an XML file, similar to `conf/fairscheduler.xml.template`, and setting a `spark.scheduler.allocation.file` property in your @@ -274,4 +279,4 @@ within it for the various settings. For example: A full example is also available in `conf/fairscheduler.xml.template`. Note that any pools not configured in the XML file will simply get default values for all settings (scheduling mode FIFO, -weight 1, and minShare 0). +weight 1, no limit on running tasks and minShare 0).