Skip to content

Commit 713a2e2

Browse files
Waiman-LongPeter Zijlstra
authored andcommitted
sched: Introduce affinity_context
In order to prepare for passing through additional data through the affinity call-chains, convert the mask and flags argument into a structure. Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 5584e8a commit 713a2e2

File tree

3 files changed

+85
-47
lines changed

3 files changed

+85
-47
lines changed

kernel/sched/core.c

Lines changed: 75 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -2189,14 +2189,18 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
21892189
#ifdef CONFIG_SMP
21902190

21912191
static void
2192-
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2192+
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
21932193

21942194
static int __set_cpus_allowed_ptr(struct task_struct *p,
2195-
const struct cpumask *new_mask,
2196-
u32 flags);
2195+
struct affinity_context *ctx);
21972196

21982197
static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
21992198
{
2199+
struct affinity_context ac = {
2200+
.new_mask = cpumask_of(rq->cpu),
2201+
.flags = SCA_MIGRATE_DISABLE,
2202+
};
2203+
22002204
if (likely(!p->migration_disabled))
22012205
return;
22022206

@@ -2206,7 +2210,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
22062210
/*
22072211
* Violates locking rules! see comment in __do_set_cpus_allowed().
22082212
*/
2209-
__do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
2213+
__do_set_cpus_allowed(p, &ac);
22102214
}
22112215

22122216
void migrate_disable(void)
@@ -2228,6 +2232,10 @@ EXPORT_SYMBOL_GPL(migrate_disable);
22282232
void migrate_enable(void)
22292233
{
22302234
struct task_struct *p = current;
2235+
struct affinity_context ac = {
2236+
.new_mask = &p->cpus_mask,
2237+
.flags = SCA_MIGRATE_ENABLE,
2238+
};
22312239

22322240
if (p->migration_disabled > 1) {
22332241
p->migration_disabled--;
@@ -2243,7 +2251,7 @@ void migrate_enable(void)
22432251
*/
22442252
preempt_disable();
22452253
if (p->cpus_ptr != &p->cpus_mask)
2246-
__set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
2254+
__set_cpus_allowed_ptr(p, &ac);
22472255
/*
22482256
* Mustn't clear migration_disabled() until cpus_ptr points back at the
22492257
* regular cpus_mask, otherwise things that race (eg.
@@ -2523,19 +2531,19 @@ int push_cpu_stop(void *arg)
25232531
* sched_class::set_cpus_allowed must do the below, but is not required to
25242532
* actually call this function.
25252533
*/
2526-
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2534+
void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
25272535
{
2528-
if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2529-
p->cpus_ptr = new_mask;
2536+
if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2537+
p->cpus_ptr = ctx->new_mask;
25302538
return;
25312539
}
25322540

2533-
cpumask_copy(&p->cpus_mask, new_mask);
2534-
p->nr_cpus_allowed = cpumask_weight(new_mask);
2541+
cpumask_copy(&p->cpus_mask, ctx->new_mask);
2542+
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
25352543
}
25362544

25372545
static void
2538-
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2546+
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
25392547
{
25402548
struct rq *rq = task_rq(p);
25412549
bool queued, running;
@@ -2552,7 +2560,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
25522560
*
25532561
* XXX do further audits, this smells like something putrid.
25542562
*/
2555-
if (flags & SCA_MIGRATE_DISABLE)
2563+
if (ctx->flags & SCA_MIGRATE_DISABLE)
25562564
SCHED_WARN_ON(!p->on_cpu);
25572565
else
25582566
lockdep_assert_held(&p->pi_lock);
@@ -2571,7 +2579,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
25712579
if (running)
25722580
put_prev_task(rq, p);
25732581

2574-
p->sched_class->set_cpus_allowed(p, new_mask, flags);
2582+
p->sched_class->set_cpus_allowed(p, ctx);
25752583

25762584
if (queued)
25772585
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -2581,7 +2589,12 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
25812589

25822590
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
25832591
{
2584-
__do_set_cpus_allowed(p, new_mask, 0);
2592+
struct affinity_context ac = {
2593+
.new_mask = new_mask,
2594+
.flags = 0,
2595+
};
2596+
2597+
__do_set_cpus_allowed(p, &ac);
25852598
}
25862599

25872600
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
@@ -2834,8 +2847,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
28342847
* Called with both p->pi_lock and rq->lock held; drops both before returning.
28352848
*/
28362849
static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2837-
const struct cpumask *new_mask,
2838-
u32 flags,
2850+
struct affinity_context *ctx,
28392851
struct rq *rq,
28402852
struct rq_flags *rf)
28412853
__releases(rq->lock)
@@ -2864,7 +2876,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
28642876
cpu_valid_mask = cpu_online_mask;
28652877
}
28662878

2867-
if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
2879+
if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
28682880
ret = -EINVAL;
28692881
goto out;
28702882
}
@@ -2873,18 +2885,18 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
28732885
* Must re-check here, to close a race against __kthread_bind(),
28742886
* sched_setaffinity() is not guaranteed to observe the flag.
28752887
*/
2876-
if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2888+
if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
28772889
ret = -EINVAL;
28782890
goto out;
28792891
}
28802892

2881-
if (!(flags & SCA_MIGRATE_ENABLE)) {
2882-
if (cpumask_equal(&p->cpus_mask, new_mask))
2893+
if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
2894+
if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
28832895
goto out;
28842896

28852897
if (WARN_ON_ONCE(p == current &&
28862898
is_migration_disabled(p) &&
2887-
!cpumask_test_cpu(task_cpu(p), new_mask))) {
2899+
!cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
28882900
ret = -EBUSY;
28892901
goto out;
28902902
}
@@ -2895,18 +2907,18 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
28952907
* for groups of tasks (ie. cpuset), so that load balancing is not
28962908
* immediately required to distribute the tasks within their new mask.
28972909
*/
2898-
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
2910+
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
28992911
if (dest_cpu >= nr_cpu_ids) {
29002912
ret = -EINVAL;
29012913
goto out;
29022914
}
29032915

2904-
__do_set_cpus_allowed(p, new_mask, flags);
2916+
__do_set_cpus_allowed(p, ctx);
29052917

2906-
if (flags & SCA_USER)
2918+
if (ctx->flags & SCA_USER)
29072919
user_mask = clear_user_cpus_ptr(p);
29082920

2909-
ret = affine_move_task(rq, p, rf, dest_cpu, flags);
2921+
ret = affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
29102922

29112923
kfree(user_mask);
29122924

@@ -2928,18 +2940,23 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
29282940
* call is not atomic; no spinlocks may be held.
29292941
*/
29302942
static int __set_cpus_allowed_ptr(struct task_struct *p,
2931-
const struct cpumask *new_mask, u32 flags)
2943+
struct affinity_context *ctx)
29322944
{
29332945
struct rq_flags rf;
29342946
struct rq *rq;
29352947

29362948
rq = task_rq_lock(p, &rf);
2937-
return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
2949+
return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
29382950
}
29392951

29402952
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
29412953
{
2942-
return __set_cpus_allowed_ptr(p, new_mask, 0);
2954+
struct affinity_context ac = {
2955+
.new_mask = new_mask,
2956+
.flags = 0,
2957+
};
2958+
2959+
return __set_cpus_allowed_ptr(p, &ac);
29432960
}
29442961
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
29452962

@@ -2955,6 +2972,7 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
29552972
const struct cpumask *subset_mask)
29562973
{
29572974
struct cpumask *user_mask = NULL;
2975+
struct affinity_context ac;
29582976
struct rq_flags rf;
29592977
struct rq *rq;
29602978
int err;
@@ -2991,7 +3009,11 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
29913009
p->user_cpus_ptr = user_mask;
29923010
}
29933011

2994-
return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
3012+
ac = (struct affinity_context){
3013+
.new_mask = new_mask,
3014+
};
3015+
3016+
return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
29953017

29963018
err_unlock:
29973019
task_rq_unlock(rq, p, &rf);
@@ -3045,7 +3067,7 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p)
30453067
}
30463068

30473069
static int
3048-
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
3070+
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
30493071

30503072
/*
30513073
* Restore the affinity of a task @p which was previously restricted by a
@@ -3058,14 +3080,17 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
30583080
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
30593081
{
30603082
struct cpumask *user_mask = p->user_cpus_ptr;
3083+
struct affinity_context ac = {
3084+
.new_mask = user_mask,
3085+
};
30613086
unsigned long flags;
30623087

30633088
/*
30643089
* Try to restore the old affinity mask. If this fails, then
30653090
* we free the mask explicitly to avoid it being inherited across
30663091
* a subsequent fork().
30673092
*/
3068-
if (!user_mask || !__sched_setaffinity(p, user_mask))
3093+
if (!user_mask || !__sched_setaffinity(p, &ac))
30693094
return;
30703095

30713096
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -3550,10 +3575,9 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
35503575
#else /* CONFIG_SMP */
35513576

35523577
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3553-
const struct cpumask *new_mask,
3554-
u32 flags)
3578+
struct affinity_context *ctx)
35553579
{
3556-
return set_cpus_allowed_ptr(p, new_mask);
3580+
return set_cpus_allowed_ptr(p, ctx->new_mask);
35573581
}
35583582

35593583
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
@@ -8090,7 +8114,7 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
80908114
#endif
80918115

80928116
static int
8093-
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
8117+
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
80948118
{
80958119
int retval;
80968120
cpumask_var_t cpus_allowed, new_mask;
@@ -8104,13 +8128,16 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
81048128
}
81058129

81068130
cpuset_cpus_allowed(p, cpus_allowed);
8107-
cpumask_and(new_mask, mask, cpus_allowed);
8131+
cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8132+
8133+
ctx->new_mask = new_mask;
8134+
ctx->flags |= SCA_CHECK;
81088135

81098136
retval = dl_task_check_affinity(p, new_mask);
81108137
if (retval)
81118138
goto out_free_new_mask;
81128139
again:
8113-
retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
8140+
retval = __set_cpus_allowed_ptr(p, ctx);
81148141
if (retval)
81158142
goto out_free_new_mask;
81168143

@@ -8133,6 +8160,9 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
81338160

81348161
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
81358162
{
8163+
struct affinity_context ac = {
8164+
.new_mask = in_mask,
8165+
};
81368166
struct task_struct *p;
81378167
int retval;
81388168

@@ -8167,7 +8197,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
81678197
if (retval)
81688198
goto out_put_task;
81698199

8170-
retval = __sched_setaffinity(p, in_mask);
8200+
retval = __sched_setaffinity(p, &ac);
81718201
out_put_task:
81728202
put_task_struct(p);
81738203
return retval;
@@ -8948,6 +8978,12 @@ void show_state_filter(unsigned int state_filter)
89488978
*/
89498979
void __init init_idle(struct task_struct *idle, int cpu)
89508980
{
8981+
#ifdef CONFIG_SMP
8982+
struct affinity_context ac = (struct affinity_context) {
8983+
.new_mask = cpumask_of(cpu),
8984+
.flags = 0,
8985+
};
8986+
#endif
89518987
struct rq *rq = cpu_rq(cpu);
89528988
unsigned long flags;
89538989

@@ -8972,7 +9008,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
89729008
*
89739009
* And since this is boot we can forgo the serialization.
89749010
*/
8975-
set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
9011+
set_cpus_allowed_common(idle, &ac);
89769012
#endif
89779013
/*
89789014
* We're having a chicken and egg problem, even though we are

kernel/sched/deadline.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
24852485
}
24862486

24872487
static void set_cpus_allowed_dl(struct task_struct *p,
2488-
const struct cpumask *new_mask,
2489-
u32 flags)
2488+
struct affinity_context *ctx)
24902489
{
24912490
struct root_domain *src_rd;
24922491
struct rq *rq;
@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
25012500
* update. We already made space for us in the destination
25022501
* domain (see cpuset_can_attach()).
25032502
*/
2504-
if (!cpumask_intersects(src_rd->span, new_mask)) {
2503+
if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
25052504
struct dl_bw *src_dl_b;
25062505

25072506
src_dl_b = dl_bw_of(cpu_of(rq));
@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
25152514
raw_spin_unlock(&src_dl_b->lock);
25162515
}
25172516

2518-
set_cpus_allowed_common(p, new_mask, flags);
2517+
set_cpus_allowed_common(p, ctx);
25192518
}
25202519

25212520
/* Assumes rq->lock is held */

kernel/sched/sched.h

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2145,6 +2145,11 @@ extern const u32 sched_prio_to_wmult[40];
21452145

21462146
#define RETRY_TASK ((void *)-1UL)
21472147

2148+
struct affinity_context {
2149+
const struct cpumask *new_mask;
2150+
unsigned int flags;
2151+
};
2152+
21482153
struct sched_class {
21492154

21502155
#ifdef CONFIG_UCLAMP_TASK
@@ -2173,9 +2178,7 @@ struct sched_class {
21732178

21742179
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
21752180

2176-
void (*set_cpus_allowed)(struct task_struct *p,
2177-
const struct cpumask *newmask,
2178-
u32 flags);
2181+
void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
21792182

21802183
void (*rq_online)(struct rq *rq);
21812184
void (*rq_offline)(struct rq *rq);
@@ -2286,7 +2289,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
22862289

22872290
extern void trigger_load_balance(struct rq *rq);
22882291

2289-
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2292+
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
22902293

22912294
static inline struct task_struct *get_push_task(struct rq *rq)
22922295
{

0 commit comments

Comments
 (0)