Skip to content

Commit 5f2a45f

Browse files
author
Peter Zijlstra
committed
sched: Allow put_prev_task() to drop rq->lock
Currently the pick_next_task() loop is convoluted and ugly because of how it can drop the rq->lock and needs to restart the picking. For the RT/Deadline classes, it is put_prev_task() where we do balancing, and we could do this before the picking loop. Make this possible. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: Aaron Lu <[email protected]> Cc: [email protected] Cc: Phil Auld <[email protected]> Cc: Julien Desfossez <[email protected]> Cc: Nishanth Aravamudan <[email protected]> Link: https://lkml.kernel.org/r/e4519f6850477ab7f3d257062796e6425ee4ba7c.1559129225.git.vpillai@digitalocean.com
1 parent 5ba553e commit 5f2a45f

File tree

7 files changed

+32
-8
lines changed

7 files changed

+32
-8
lines changed

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6090,7 +6090,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
60906090
for_each_class(class) {
60916091
next = class->pick_next_task(rq, NULL, NULL);
60926092
if (next) {
6093-
next->sched_class->put_prev_task(rq, next);
6093+
next->sched_class->put_prev_task(rq, next, NULL);
60946094
return next;
60956095
}
60966096
}

kernel/sched/deadline.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1804,13 +1804,25 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
18041804
return p;
18051805
}
18061806

1807-
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1807+
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
18081808
{
18091809
update_curr_dl(rq);
18101810

18111811
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
18121812
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
18131813
enqueue_pushable_dl_task(rq, p);
1814+
1815+
if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1816+
/*
1817+
* This is OK, because current is on_cpu, which avoids it being
1818+
* picked for load-balance and preemption/IRQs are still
1819+
* disabled avoiding further scheduler activity on it and we've
1820+
* not yet started the picking loop.
1821+
*/
1822+
rq_unpin_lock(rq, rf);
1823+
pull_dl_task(rq);
1824+
rq_repin_lock(rq, rf);
1825+
}
18141826
}
18151827

18161828
/*

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6901,7 +6901,7 @@ done: __maybe_unused;
69016901
/*
69026902
* Account for a descheduled task:
69036903
*/
6904-
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6904+
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
69056905
{
69066906
struct sched_entity *se = &prev->se;
69076907
struct cfs_rq *cfs_rq;

kernel/sched/idle.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
374374
resched_curr(rq);
375375
}
376376

377-
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
377+
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
378378
{
379379
}
380380

kernel/sched/rt.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1592,7 +1592,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
15921592
return p;
15931593
}
15941594

1595-
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1595+
static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
15961596
{
15971597
update_curr_rt(rq);
15981598

@@ -1604,6 +1604,18 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
16041604
*/
16051605
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
16061606
enqueue_pushable_task(rq, p);
1607+
1608+
if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1609+
/*
1610+
* This is OK, because current is on_cpu, which avoids it being
1611+
* picked for load-balance and preemption/IRQs are still
1612+
* disabled avoiding further scheduler activity on it and we've
1613+
* not yet started the picking loop.
1614+
*/
1615+
rq_unpin_lock(rq, rf);
1616+
pull_rt_task(rq);
1617+
rq_repin_lock(rq, rf);
1618+
}
16071619
}
16081620

16091621
#ifdef CONFIG_SMP

kernel/sched/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1710,7 +1710,7 @@ struct sched_class {
17101710
struct task_struct * (*pick_next_task)(struct rq *rq,
17111711
struct task_struct *prev,
17121712
struct rq_flags *rf);
1713-
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1713+
void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
17141714
void (*set_next_task)(struct rq *rq, struct task_struct *p);
17151715

17161716
#ifdef CONFIG_SMP
@@ -1756,7 +1756,7 @@ struct sched_class {
17561756
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
17571757
{
17581758
WARN_ON_ONCE(rq->curr != prev);
1759-
prev->sched_class->put_prev_task(rq, prev);
1759+
prev->sched_class->put_prev_task(rq, prev, NULL);
17601760
}
17611761

17621762
static inline void set_next_task(struct rq *rq, struct task_struct *next)

kernel/sched/stop_task.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq)
5959
BUG(); /* the stop task should never yield, its pointless. */
6060
}
6161

62-
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
62+
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6363
{
6464
struct task_struct *curr = rq->curr;
6565
u64 delta_exec;

0 commit comments

Comments
 (0)