Skip to content

Commit 502ce00

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Use task_groups instead of leaf_cfs_rq_list to walk all cfs_rqs
In order to allow leaf_cfs_rq_list to remove entries switch the bandwidth hotplug code over to the task_groups list. Suggested-by: Tejun Heo <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Chris Mason <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Paul Turner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent ae4df9d commit 502ce00

File tree

1 file changed

+25
-5
lines changed

1 file changed

+25
-5
lines changed

kernel/sched/fair.c

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4642,24 +4642,43 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
46424642
hrtimer_cancel(&cfs_b->slack_timer);
46434643
}
46444644

4645+
/*
4646+
* Both these cpu hotplug callbacks race against unregister_fair_sched_group()
4647+
*
4648+
* The race is harmless, since modifying bandwidth settings of unhooked group
4649+
* bits doesn't do much.
4650+
*/
4651+
4652+
/* cpu online calback */
46454653
static void __maybe_unused update_runtime_enabled(struct rq *rq)
46464654
{
4647-
struct cfs_rq *cfs_rq;
4655+
struct task_group *tg;
46484656

4649-
for_each_leaf_cfs_rq(rq, cfs_rq) {
4650-
struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4657+
lockdep_assert_held(&rq->lock);
4658+
4659+
rcu_read_lock();
4660+
list_for_each_entry_rcu(tg, &task_groups, list) {
4661+
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
4662+
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
46514663

46524664
raw_spin_lock(&cfs_b->lock);
46534665
cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
46544666
raw_spin_unlock(&cfs_b->lock);
46554667
}
4668+
rcu_read_unlock();
46564669
}
46574670

4671+
/* cpu offline callback */
46584672
static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
46594673
{
4660-
struct cfs_rq *cfs_rq;
4674+
struct task_group *tg;
4675+
4676+
lockdep_assert_held(&rq->lock);
4677+
4678+
rcu_read_lock();
4679+
list_for_each_entry_rcu(tg, &task_groups, list) {
4680+
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
46614681

4662-
for_each_leaf_cfs_rq(rq, cfs_rq) {
46634682
if (!cfs_rq->runtime_enabled)
46644683
continue;
46654684

@@ -4677,6 +4696,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
46774696
if (cfs_rq_throttled(cfs_rq))
46784697
unthrottle_cfs_rq(cfs_rq);
46794698
}
4699+
rcu_read_unlock();
46804700
}
46814701

46824702
#else /* CONFIG_CFS_BANDWIDTH */

0 commit comments

Comments
 (0)