Skip to content

Commit f95d4ea

Browse files
author
Peter Zijlstra
committed
sched/{rt,deadline}: Fix set_next_task vs pick_next_task
Because pick_next_task() implies set_curr_task() and some of the details haven't mattered too much, some of what _should_ be in set_curr_task() ended up in pick_next_task, correct this. This prepares the way for a pick_next_task() variant that does not affect the current state; allowing remote picking. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Aaron Lu <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: [email protected] Cc: Phil Auld <[email protected]> Cc: Julien Desfossez <[email protected]> Cc: Nishanth Aravamudan <[email protected]> Link: https://lkml.kernel.org/r/38c61d5240553e043c27c5e00b9dd0d184dd6081.1559129225.git.vpillai@digitalocean.com
1 parent 5feeb78 commit f95d4ea

File tree

2 files changed

+24
-24
lines changed

2 files changed

+24
-24
lines changed

kernel/sched/deadline.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1727,12 +1727,20 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
17271727
}
17281728
#endif
17291729

1730-
static inline void set_next_task(struct rq *rq, struct task_struct *p)
1730+
static void set_next_task_dl(struct rq *rq, struct task_struct *p)
17311731
{
17321732
p->se.exec_start = rq_clock_task(rq);
17331733

17341734
/* You can't push away the running task */
17351735
dequeue_pushable_dl_task(rq, p);
1736+
1737+
if (hrtick_enabled(rq))
1738+
start_hrtick_dl(rq, p);
1739+
1740+
if (rq->curr->sched_class != &dl_sched_class)
1741+
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1742+
1743+
deadline_queue_push_tasks(rq);
17361744
}
17371745

17381746
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
@@ -1791,15 +1799,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
17911799

17921800
p = dl_task_of(dl_se);
17931801

1794-
set_next_task(rq, p);
1795-
1796-
if (hrtick_enabled(rq))
1797-
start_hrtick_dl(rq, p);
1798-
1799-
deadline_queue_push_tasks(rq);
1800-
1801-
if (rq->curr->sched_class != &dl_sched_class)
1802-
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1802+
set_next_task_dl(rq, p);
18031803

18041804
return p;
18051805
}
@@ -1846,7 +1846,7 @@ static void task_fork_dl(struct task_struct *p)
18461846

18471847
static void set_curr_task_dl(struct rq *rq)
18481848
{
1849-
set_next_task(rq, rq->curr);
1849+
set_next_task_dl(rq, rq->curr);
18501850
}
18511851

18521852
#ifdef CONFIG_SMP

kernel/sched/rt.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1498,12 +1498,22 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
14981498
#endif
14991499
}
15001500

1501-
static inline void set_next_task(struct rq *rq, struct task_struct *p)
1501+
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p)
15021502
{
15031503
p->se.exec_start = rq_clock_task(rq);
15041504

15051505
/* The running task is never eligible for pushing */
15061506
dequeue_pushable_task(rq, p);
1507+
1508+
/*
1509+
* If prev task was rt, put_prev_task() has already updated the
1510+
* utilization. We only care of the case where we start to schedule a
1511+
* rt task
1512+
*/
1513+
if (rq->curr->sched_class != &rt_sched_class)
1514+
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1515+
1516+
rt_queue_push_tasks(rq);
15071517
}
15081518

15091519
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
@@ -1577,17 +1587,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
15771587

15781588
p = _pick_next_task_rt(rq);
15791589

1580-
set_next_task(rq, p);
1581-
1582-
rt_queue_push_tasks(rq);
1583-
1584-
/*
1585-
* If prev task was rt, put_prev_task() has already updated the
1586-
* utilization. We only care of the case where we start to schedule a
1587-
* rt task
1588-
*/
1589-
if (rq->curr->sched_class != &rt_sched_class)
1590-
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1590+
set_next_task_rt(rq, p);
15911591

15921592
return p;
15931593
}
@@ -2356,7 +2356,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
23562356

23572357
static void set_curr_task_rt(struct rq *rq)
23582358
{
2359-
set_next_task(rq, rq->curr);
2359+
set_next_task_rt(rq, rq->curr);
23602360
}
23612361

23622362
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)

0 commit comments

Comments
 (0)