Skip to content

Commit 8f9ea86

Browse files
Waiman-LongPeter Zijlstra
authored andcommitted
sched: Always preserve the user requested cpumask
Unconditionally preserve the user requested cpumask on sched_setaffinity() calls. This allows using it outside of the fairly narrow restrict_cpus_allowed_ptr() use-case and fix some cpuset issues that currently suffer destruction of cpumasks. Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 713a2e2 commit 8f9ea86

File tree

2 files changed

+72
-55
lines changed

2 files changed

+72
-55
lines changed

kernel/sched/core.c

Lines changed: 64 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -2540,6 +2540,12 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
25402540

25412541
cpumask_copy(&p->cpus_mask, ctx->new_mask);
25422542
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2543+
2544+
/*
2545+
* Swap in a new user_cpus_ptr if SCA_USER flag set
2546+
*/
2547+
if (ctx->flags & SCA_USER)
2548+
swap(p->user_cpus_ptr, ctx->user_mask);
25432549
}
25442550

25452551
static void
@@ -2600,14 +2606,19 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
26002606
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
26012607
int node)
26022608
{
2609+
unsigned long flags;
2610+
26032611
if (!src->user_cpus_ptr)
26042612
return 0;
26052613

26062614
dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
26072615
if (!dst->user_cpus_ptr)
26082616
return -ENOMEM;
26092617

2618+
/* Use pi_lock to protect content of user_cpus_ptr */
2619+
raw_spin_lock_irqsave(&src->pi_lock, flags);
26102620
cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2621+
raw_spin_unlock_irqrestore(&src->pi_lock, flags);
26112622
return 0;
26122623
}
26132624

@@ -2856,7 +2867,6 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
28562867
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
28572868
const struct cpumask *cpu_valid_mask = cpu_active_mask;
28582869
bool kthread = p->flags & PF_KTHREAD;
2859-
struct cpumask *user_mask = NULL;
28602870
unsigned int dest_cpu;
28612871
int ret = 0;
28622872

@@ -2915,14 +2925,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
29152925

29162926
__do_set_cpus_allowed(p, ctx);
29172927

2918-
if (ctx->flags & SCA_USER)
2919-
user_mask = clear_user_cpus_ptr(p);
2920-
2921-
ret = affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
2922-
2923-
kfree(user_mask);
2924-
2925-
return ret;
2928+
return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
29262929

29272930
out:
29282931
task_rq_unlock(rq, p, rf);
@@ -2962,27 +2965,25 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
29622965

29632966
/*
29642967
* Change a given task's CPU affinity to the intersection of its current
2965-
* affinity mask and @subset_mask, writing the resulting mask to @new_mask
2966-
* and pointing @p->user_cpus_ptr to a copy of the old mask.
2968+
* affinity mask and @subset_mask, writing the resulting mask to @new_mask.
2969+
* If user_cpus_ptr is defined, use it as the basis for restricting CPU
2970+
* affinity or use cpu_online_mask instead.
2971+
*
29672972
* If the resulting mask is empty, leave the affinity unchanged and return
29682973
* -EINVAL.
29692974
*/
29702975
static int restrict_cpus_allowed_ptr(struct task_struct *p,
29712976
struct cpumask *new_mask,
29722977
const struct cpumask *subset_mask)
29732978
{
2974-
struct cpumask *user_mask = NULL;
2975-
struct affinity_context ac;
2979+
struct affinity_context ac = {
2980+
.new_mask = new_mask,
2981+
.flags = 0,
2982+
};
29762983
struct rq_flags rf;
29772984
struct rq *rq;
29782985
int err;
29792986

2980-
if (!p->user_cpus_ptr) {
2981-
user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
2982-
if (!user_mask)
2983-
return -ENOMEM;
2984-
}
2985-
29862987
rq = task_rq_lock(p, &rf);
29872988

29882989
/*
@@ -2995,29 +2996,15 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
29952996
goto err_unlock;
29962997
}
29972998

2998-
if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
2999+
if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
29993000
err = -EINVAL;
30003001
goto err_unlock;
30013002
}
30023003

3003-
/*
3004-
* We're about to butcher the task affinity, so keep track of what
3005-
* the user asked for in case we're able to restore it later on.
3006-
*/
3007-
if (user_mask) {
3008-
cpumask_copy(user_mask, p->cpus_ptr);
3009-
p->user_cpus_ptr = user_mask;
3010-
}
3011-
3012-
ac = (struct affinity_context){
3013-
.new_mask = new_mask,
3014-
};
3015-
30163004
return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
30173005

30183006
err_unlock:
30193007
task_rq_unlock(rq, p, &rf);
3020-
kfree(user_mask);
30213008
return err;
30223009
}
30233010

@@ -3071,33 +3058,25 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
30713058

30723059
/*
30733060
* Restore the affinity of a task @p which was previously restricted by a
3074-
* call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
3075-
* @p->user_cpus_ptr.
3061+
* call to force_compatible_cpus_allowed_ptr().
30763062
*
30773063
* It is the caller's responsibility to serialise this with any calls to
30783064
* force_compatible_cpus_allowed_ptr(@p).
30793065
*/
30803066
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
30813067
{
3082-
struct cpumask *user_mask = p->user_cpus_ptr;
30833068
struct affinity_context ac = {
3084-
.new_mask = user_mask,
3069+
.new_mask = task_user_cpus(p),
3070+
.flags = 0,
30853071
};
3086-
unsigned long flags;
3072+
int ret;
30873073

30883074
/*
3089-
* Try to restore the old affinity mask. If this fails, then
3090-
* we free the mask explicitly to avoid it being inherited across
3091-
* a subsequent fork().
3075+
* Try to restore the old affinity mask with __sched_setaffinity().
3076+
* Cpuset masking will be done there too.
30923077
*/
3093-
if (!user_mask || !__sched_setaffinity(p, &ac))
3094-
return;
3095-
3096-
raw_spin_lock_irqsave(&p->pi_lock, flags);
3097-
user_mask = clear_user_cpus_ptr(p);
3098-
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3099-
3100-
kfree(user_mask);
3078+
ret = __sched_setaffinity(p, &ac);
3079+
WARN_ON_ONCE(ret);
31013080
}
31023081

31033082
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
@@ -8136,7 +8115,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
81368115
retval = dl_task_check_affinity(p, new_mask);
81378116
if (retval)
81388117
goto out_free_new_mask;
8139-
again:
8118+
81408119
retval = __set_cpus_allowed_ptr(p, ctx);
81418120
if (retval)
81428121
goto out_free_new_mask;
@@ -8148,7 +8127,24 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
81488127
* Just reset the cpumask to the cpuset's cpus_allowed.
81498128
*/
81508129
cpumask_copy(new_mask, cpus_allowed);
8151-
goto again;
8130+
8131+
/*
8132+
* If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
8133+
* will restore the previous user_cpus_ptr value.
8134+
*
8135+
* In the unlikely event a previous user_cpus_ptr exists,
8136+
* we need to further restrict the mask to what is allowed
8137+
* by that old user_cpus_ptr.
8138+
*/
8139+
if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8140+
bool empty = !cpumask_and(new_mask, new_mask,
8141+
ctx->user_mask);
8142+
8143+
if (WARN_ON_ONCE(empty))
8144+
cpumask_copy(new_mask, cpus_allowed);
8145+
}
8146+
__set_cpus_allowed_ptr(p, ctx);
8147+
retval = -EINVAL;
81528148
}
81538149

81548150
out_free_new_mask:
@@ -8160,9 +8156,8 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
81608156

81618157
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
81628158
{
8163-
struct affinity_context ac = {
8164-
.new_mask = in_mask,
8165-
};
8159+
struct affinity_context ac;
8160+
struct cpumask *user_mask;
81668161
struct task_struct *p;
81678162
int retval;
81688163

@@ -8197,7 +8192,21 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
81978192
if (retval)
81988193
goto out_put_task;
81998194

8195+
user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
8196+
if (!user_mask) {
8197+
retval = -ENOMEM;
8198+
goto out_put_task;
8199+
}
8200+
cpumask_copy(user_mask, in_mask);
8201+
ac = (struct affinity_context){
8202+
.new_mask = in_mask,
8203+
.user_mask = user_mask,
8204+
.flags = SCA_USER,
8205+
};
8206+
82008207
retval = __sched_setaffinity(p, &ac);
8208+
kfree(ac.user_mask);
8209+
82018210
out_put_task:
82028211
put_task_struct(p);
82038212
return retval;

kernel/sched/sched.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1878,6 +1878,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
18781878
#endif
18791879

18801880
extern int sched_update_scaling(void);
1881+
1882+
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
1883+
{
1884+
if (!p->user_cpus_ptr)
1885+
return cpu_possible_mask; /* &init_task.cpus_mask */
1886+
return p->user_cpus_ptr;
1887+
}
18811888
#endif /* CONFIG_SMP */
18821889

18831890
#include "stats.h"
@@ -2147,6 +2154,7 @@ extern const u32 sched_prio_to_wmult[40];
21472154

21482155
struct affinity_context {
21492156
const struct cpumask *new_mask;
2157+
struct cpumask *user_mask;
21502158
unsigned int flags;
21512159
};
21522160

0 commit comments

Comments
 (0)