Skip to content

Commit 03b7fad

Browse files
author
Peter Zijlstra
committed
sched: Add task_struct pointer to sched_class::set_curr_task
In preparation of further separating pick_next_task() and set_curr_task() we have to pass the actual task into it, while there, rename the thing to better pair with put_prev_task(). Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Aaron Lu <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: [email protected] Cc: Phil Auld <[email protected]> Cc: Julien Desfossez <[email protected]> Cc: Nishanth Aravamudan <[email protected]> Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
1 parent 10e7071 commit 03b7fad

File tree

7 files changed

+48
-46
lines changed

7 files changed

+48
-46
lines changed

kernel/sched/core.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
14941494
if (queued)
14951495
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
14961496
if (running)
1497-
set_curr_task(rq, p);
1497+
set_next_task(rq, p);
14981498
}
14991499

15001500
/*
@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
43254325
if (queued)
43264326
enqueue_task(rq, p, queue_flag);
43274327
if (running)
4328-
set_curr_task(rq, p);
4328+
set_next_task(rq, p);
43294329

43304330
check_class_changed(rq, p, prev_class, oldprio);
43314331
out_unlock:
@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice)
43924392
resched_curr(rq);
43934393
}
43944394
if (running)
4395-
set_curr_task(rq, p);
4395+
set_next_task(rq, p);
43964396
out_unlock:
43974397
task_rq_unlock(rq, p, &rf);
43984398
}
@@ -4840,7 +4840,7 @@ static int __sched_setscheduler(struct task_struct *p,
48404840
enqueue_task(rq, p, queue_flags);
48414841
}
48424842
if (running)
4843-
set_curr_task(rq, p);
4843+
set_next_task(rq, p);
48444844

48454845
check_class_changed(rq, p, prev_class, oldprio);
48464846

@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid)
60426042
if (queued)
60436043
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
60446044
if (running)
6045-
set_curr_task(rq, p);
6045+
set_next_task(rq, p);
60466046
task_rq_unlock(rq, p, &rf);
60476047
}
60486048
#endif /* CONFIG_NUMA_BALANCING */
@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk)
69196919
if (queued)
69206920
enqueue_task(rq, tsk, queue_flags);
69216921
if (running)
6922-
set_curr_task(rq, tsk);
6922+
set_next_task(rq, tsk);
69236923

69246924
task_rq_unlock(rq, tsk, &rf);
69256925
}

kernel/sched/deadline.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p)
18441844
*/
18451845
}
18461846

1847-
static void set_curr_task_dl(struct rq *rq)
1848-
{
1849-
set_next_task_dl(rq, rq->curr);
1850-
}
1851-
18521847
#ifdef CONFIG_SMP
18531848

18541849
/* Only try algorithms three times */
@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = {
24662461

24672462
.pick_next_task = pick_next_task_dl,
24682463
.put_prev_task = put_prev_task_dl,
2464+
.set_next_task = set_next_task_dl,
24692465

24702466
#ifdef CONFIG_SMP
24712467
.select_task_rq = select_task_rq_dl,
@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = {
24762472
.task_woken = task_woken_dl,
24772473
#endif
24782474

2479-
.set_curr_task = set_curr_task_dl,
24802475
.task_tick = task_tick_dl,
24812476
.task_fork = task_fork_dl,
24822477

kernel/sched/fair.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
1015010150
* This routine is mostly called to set cfs_rq->curr field when a task
1015110151
* migrates between groups/classes.
1015210152
*/
10153-
static void set_curr_task_fair(struct rq *rq)
10153+
static void set_next_task_fair(struct rq *rq, struct task_struct *p)
1015410154
{
10155-
struct sched_entity *se = &rq->curr->se;
10155+
struct sched_entity *se = &p->se;
10156+
10157+
#ifdef CONFIG_SMP
10158+
if (task_on_rq_queued(p)) {
10159+
/*
10160+
* Move the next running task to the front of the list, so our
10161+
* cfs_tasks list becomes MRU one.
10162+
*/
10163+
list_move(&se->group_node, &rq->cfs_tasks);
10164+
}
10165+
#endif
1015610166

1015710167
for_each_sched_entity(se) {
1015810168
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = {
1042310433
.check_preempt_curr = check_preempt_wakeup,
1042410434

1042510435
.pick_next_task = pick_next_task_fair,
10436+
1042610437
.put_prev_task = put_prev_task_fair,
10438+
.set_next_task = set_next_task_fair,
1042710439

1042810440
#ifdef CONFIG_SMP
1042910441
.select_task_rq = select_task_rq_fair,
@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = {
1043610448
.set_cpus_allowed = set_cpus_allowed_common,
1043710449
#endif
1043810450

10439-
.set_curr_task = set_curr_task_fair,
1044010451
.task_tick = task_tick_fair,
1044110452
.task_fork = task_fork_fair,
1044210453

kernel/sched/idle.c

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
374374
resched_curr(rq);
375375
}
376376

377+
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
378+
{
379+
}
380+
381+
static void set_next_task_idle(struct rq *rq, struct task_struct *next)
382+
{
383+
update_idle_core(rq);
384+
schedstat_inc(rq->sched_goidle);
385+
}
386+
377387
static struct task_struct *
378388
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
379389
{
390+
struct task_struct *next = rq->idle;
391+
380392
put_prev_task(rq, prev);
381-
update_idle_core(rq);
382-
schedstat_inc(rq->sched_goidle);
393+
set_next_task_idle(rq, next);
383394

384-
return rq->idle;
395+
return next;
385396
}
386397

387398
/*
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
397408
raw_spin_lock_irq(&rq->lock);
398409
}
399410

400-
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
401-
{
402-
}
403-
404411
/*
405412
* scheduler tick hitting a task of our scheduling class.
406413
*
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
413420
{
414421
}
415422

416-
static void set_curr_task_idle(struct rq *rq)
417-
{
418-
}
419-
420423
static void switched_to_idle(struct rq *rq, struct task_struct *p)
421424
{
422425
BUG();
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = {
451454

452455
.pick_next_task = pick_next_task_idle,
453456
.put_prev_task = put_prev_task_idle,
457+
.set_next_task = set_next_task_idle,
454458

455459
#ifdef CONFIG_SMP
456460
.select_task_rq = select_task_rq_idle,
457461
.set_cpus_allowed = set_cpus_allowed_common,
458462
#endif
459463

460-
.set_curr_task = set_curr_task_idle,
461464
.task_tick = task_tick_idle,
462465

463466
.get_rr_interval = get_rr_interval_idle,

kernel/sched/rt.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
23542354
}
23552355
}
23562356

2357-
static void set_curr_task_rt(struct rq *rq)
2358-
{
2359-
set_next_task_rt(rq, rq->curr);
2360-
}
2361-
23622357
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
23632358
{
23642359
/*
@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = {
23802375

23812376
.pick_next_task = pick_next_task_rt,
23822377
.put_prev_task = put_prev_task_rt,
2378+
.set_next_task = set_next_task_rt,
23832379

23842380
#ifdef CONFIG_SMP
23852381
.select_task_rq = select_task_rq_rt,
@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = {
23912387
.switched_from = switched_from_rt,
23922388
#endif
23932389

2394-
.set_curr_task = set_curr_task_rt,
23952390
.task_tick = task_tick_rt,
23962391

23972392
.get_rr_interval = get_rr_interval_rt,

kernel/sched/sched.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1707,6 +1707,7 @@ struct sched_class {
17071707
struct task_struct *prev,
17081708
struct rq_flags *rf);
17091709
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1710+
void (*set_next_task)(struct rq *rq, struct task_struct *p);
17101711

17111712
#ifdef CONFIG_SMP
17121713
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
@@ -1721,7 +1722,6 @@ struct sched_class {
17211722
void (*rq_offline)(struct rq *rq);
17221723
#endif
17231724

1724-
void (*set_curr_task)(struct rq *rq);
17251725
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
17261726
void (*task_fork)(struct task_struct *p);
17271727
void (*task_dead)(struct task_struct *p);
@@ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
17551755
prev->sched_class->put_prev_task(rq, prev);
17561756
}
17571757

1758-
static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1758+
static inline void set_next_task(struct rq *rq, struct task_struct *next)
17591759
{
1760-
curr->sched_class->set_curr_task(rq);
1760+
WARN_ON_ONCE(rq->curr != next);
1761+
next->sched_class->set_next_task(rq, next);
17611762
}
17621763

17631764
#ifdef CONFIG_SMP

kernel/sched/stop_task.c

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
2323
/* we're never preempted */
2424
}
2525

26+
static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
27+
{
28+
stop->se.exec_start = rq_clock_task(rq);
29+
}
30+
2631
static struct task_struct *
2732
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
2833
{
@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
3237
return NULL;
3338

3439
put_prev_task(rq, prev);
35-
36-
stop->se.exec_start = rq_clock_task(rq);
40+
set_next_task_stop(rq, stop);
3741

3842
return stop;
3943
}
@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
8690
{
8791
}
8892

89-
static void set_curr_task_stop(struct rq *rq)
90-
{
91-
struct task_struct *stop = rq->stop;
92-
93-
stop->se.exec_start = rq_clock_task(rq);
94-
}
95-
9693
static void switched_to_stop(struct rq *rq, struct task_struct *p)
9794
{
9895
BUG(); /* its impossible to change to this class */
@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = {
128125

129126
.pick_next_task = pick_next_task_stop,
130127
.put_prev_task = put_prev_task_stop,
128+
.set_next_task = set_next_task_stop,
131129

132130
#ifdef CONFIG_SMP
133131
.select_task_rq = select_task_rq_stop,
134132
.set_cpus_allowed = set_cpus_allowed_common,
135133
#endif
136134

137-
.set_curr_task = set_curr_task_stop,
138135
.task_tick = task_tick_stop,
139136

140137
.get_rr_interval = get_rr_interval_stop,

0 commit comments

Comments
 (0)