Skip to content

Commit 9e3081c

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched: Make tg_shares_up() walk on-demand
Make tg_shares_up() use the active cgroup list, this means we cannot do a strict bottom-up walk of the hierarchy, but assuming its a very wide tree with a small number of active groups it should be a win. Signed-off-by: Paul Turner <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3d4b47b commit 9e3081c

File tree

2 files changed

+58
-67
lines changed

2 files changed

+58
-67
lines changed

kernel/sched.c

Lines changed: 0 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock);
279279

280280
#ifdef CONFIG_FAIR_GROUP_SCHED
281281

282-
#ifdef CONFIG_SMP
283-
static int root_task_group_empty(void)
284-
{
285-
return list_empty(&root_task_group.children);
286-
}
287-
#endif
288-
289282
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
290283

291284
/*
@@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
15461539

15471540
#ifdef CONFIG_FAIR_GROUP_SCHED
15481541

1549-
static void update_cfs_load(struct cfs_rq *cfs_rq, int lb);
1550-
static void update_cfs_shares(struct cfs_rq *cfs_rq);
1551-
1552-
/*
1553-
* update tg->load_weight by folding this cpu's load_avg
1554-
*/
1555-
static int tg_shares_up(struct task_group *tg, void *data)
1556-
{
1557-
long load_avg;
1558-
struct cfs_rq *cfs_rq;
1559-
unsigned long flags;
1560-
int cpu = (long)data;
1561-
struct rq *rq;
1562-
1563-
if (!tg->se[cpu])
1564-
return 0;
1565-
1566-
rq = cpu_rq(cpu);
1567-
cfs_rq = tg->cfs_rq[cpu];
1568-
1569-
raw_spin_lock_irqsave(&rq->lock, flags);
1570-
1571-
update_rq_clock(rq);
1572-
update_cfs_load(cfs_rq, 1);
1573-
1574-
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
1575-
load_avg -= cfs_rq->load_contribution;
1576-
1577-
atomic_add(load_avg, &tg->load_weight);
1578-
cfs_rq->load_contribution += load_avg;
1579-
1580-
/*
1581-
* We need to update shares after updating tg->load_weight in
1582-
* order to adjust the weight of groups with long running tasks.
1583-
*/
1584-
update_cfs_shares(cfs_rq);
1585-
1586-
raw_spin_unlock_irqrestore(&rq->lock, flags);
1587-
1588-
return 0;
1589-
}
1590-
15911542
/*
15921543
* Compute the cpu's hierarchical load factor for each task group.
15931544
* This needs to be done in a top-down fashion because the load of a child
@@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data)
16111562
return 0;
16121563
}
16131564

1614-
static void update_shares(long cpu)
1615-
{
1616-
if (root_task_group_empty())
1617-
return;
1618-
1619-
/*
1620-
* XXX: replace with an on-demand list
1621-
*/
1622-
1623-
walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu);
1624-
}
1625-
16261565
static void update_h_load(long cpu)
16271566
{
16281567
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
16291568
}
16301569

1631-
#else
1632-
1633-
static inline void update_shares(int cpu)
1634-
{
1635-
}
1636-
16371570
#endif
16381571

16391572
#ifdef CONFIG_PREEMPT

kernel/sched_fair.c

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2004,6 +2004,60 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
20042004
}
20052005

20062006
#ifdef CONFIG_FAIR_GROUP_SCHED
2007+
/*
2008+
* update tg->load_weight by folding this cpu's load_avg
2009+
*/
2010+
static int tg_shares_up(struct task_group *tg, int cpu)
2011+
{
2012+
struct cfs_rq *cfs_rq;
2013+
unsigned long flags;
2014+
struct rq *rq;
2015+
long load_avg;
2016+
2017+
if (!tg->se[cpu])
2018+
return 0;
2019+
2020+
rq = cpu_rq(cpu);
2021+
cfs_rq = tg->cfs_rq[cpu];
2022+
2023+
raw_spin_lock_irqsave(&rq->lock, flags);
2024+
2025+
update_rq_clock(rq);
2026+
update_cfs_load(cfs_rq, 1);
2027+
2028+
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
2029+
load_avg -= cfs_rq->load_contribution;
2030+
atomic_add(load_avg, &tg->load_weight);
2031+
cfs_rq->load_contribution += load_avg;
2032+
2033+
/*
2034+
* We need to update shares after updating tg->load_weight in
2035+
* order to adjust the weight of groups with long running tasks.
2036+
*/
2037+
update_cfs_shares(cfs_rq);
2038+
2039+
raw_spin_unlock_irqrestore(&rq->lock, flags);
2040+
2041+
return 0;
2042+
}
2043+
2044+
static void update_shares(int cpu)
2045+
{
2046+
struct cfs_rq *cfs_rq;
2047+
struct rq *rq = cpu_rq(cpu);
2048+
2049+
rcu_read_lock();
2050+
for_each_leaf_cfs_rq(rq, cfs_rq) {
2051+
struct task_group *tg = cfs_rq->tg;
2052+
2053+
do {
2054+
tg_shares_up(tg, cpu);
2055+
tg = tg->parent;
2056+
} while (tg);
2057+
}
2058+
rcu_read_unlock();
2059+
}
2060+
20072061
static unsigned long
20082062
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
20092063
unsigned long max_load_move,
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
20512105
return max_load_move - rem_load_move;
20522106
}
20532107
#else
2108+
static inline void update_shares(int cpu)
2109+
{
2110+
}
2111+
20542112
static unsigned long
20552113
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
20562114
unsigned long max_load_move,

0 commit comments

Comments
 (0)