Skip to content

Commit 67e8625

Browse files
paulturnerIngo Molnar
authored andcommitted
sched: Introduce hierarchal order on shares update list
Avoid duplicate shares update calls by ensuring children always appear before parents in rq->leaf_cfs_rq_list. This allows us to do a single in-order traversal for update_shares(). Since we always enqueue in bottom-up order this reduces to 2 cases: 1) Our parent is already in the list, e.g. root \ b /\ c d* (root->b->c already enqueued) Since d's parent is enqueued we push it to the head of the list, implicitly ahead of b. 2) Our parent does not appear in the list (or we have no parent) In this case we enqueue to the tail of the list, if our parent is subsequently enqueued (bottom-up) it will appear to our right by the same rule. Signed-off-by: Paul Turner <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent e33078b commit 67e8625

File tree

1 file changed

+16
-10
lines changed

1 file changed

+16
-10
lines changed

kernel/sched_fair.c

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,20 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
146146
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
147147
{
148148
if (!cfs_rq->on_list) {
149-
list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
149+
/*
150+
* Ensure we either appear before our parent (if already
151+
* enqueued) or force our parent to appear after us when it is
152+
* enqueued. The fact that we always enqueue bottom-up
153+
* reduces this to two cases.
154+
*/
155+
if (cfs_rq->tg->parent &&
156+
cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
157+
list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
158+
&rq_of(cfs_rq)->leaf_cfs_rq_list);
159+
} else {
160+
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
150161
&rq_of(cfs_rq)->leaf_cfs_rq_list);
162+
}
151163

152164
cfs_rq->on_list = 1;
153165
}
@@ -2016,7 +2028,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
20162028
/*
20172029
* update tg->load_weight by folding this cpu's load_avg
20182030
*/
2019-
static int tg_shares_up(struct task_group *tg, int cpu)
2031+
static int update_shares_cpu(struct task_group *tg, int cpu)
20202032
{
20212033
struct cfs_rq *cfs_rq;
20222034
unsigned long flags;
@@ -2056,14 +2068,8 @@ static void update_shares(int cpu)
20562068
struct rq *rq = cpu_rq(cpu);
20572069

20582070
rcu_read_lock();
2059-
for_each_leaf_cfs_rq(rq, cfs_rq) {
2060-
struct task_group *tg = cfs_rq->tg;
2061-
2062-
do {
2063-
tg_shares_up(tg, cpu);
2064-
tg = tg->parent;
2065-
} while (tg);
2066-
}
2071+
for_each_leaf_cfs_rq(rq, cfs_rq)
2072+
update_shares_cpu(cfs_rq->tg, cpu);
20672073
rcu_read_unlock();
20682074
}
20692075

0 commit comments

Comments
 (0)