Skip to content

Commit 40190a7

Browse files
committed
sched/hotplug: Convert cpu_[in]active notifiers to state machine
Now that we reduced everything into single notifiers, it's simple to move them into the hotplug state machine space. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra <[email protected]> Cc: [email protected] Signed-off-by: Thomas Gleixner <[email protected]>
1 parent c6d2c74 commit 40190a7

File tree

5 files changed

+30
-60
lines changed

5 files changed

+30
-60
lines changed

include/linux/cpu.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -59,18 +59,6 @@ struct notifier_block;
5959
* CPU notifier priorities.
6060
*/
6161
enum {
62-
/*
63-
* SCHED_ACTIVE marks a cpu which is coming up active during
64-
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first notifier. Is
65-
* also cpuset according to cpu_active mask right after activating the
66-
* cpu. During CPU_DOWN_PREPARE, SCHED_INACTIVE reversed the operation.
67-
*
68-
* This ordering guarantees consistent cpu_active mask and
69-
* migration behavior to all cpu notifiers.
70-
*/
71-
CPU_PRI_SCHED_ACTIVE = INT_MAX,
72-
CPU_PRI_SCHED_INACTIVE = INT_MIN,
73-
7462
/* migration should happen before other stuff but after perf */
7563
CPU_PRI_PERF = 20,
7664
CPU_PRI_MIGRATION = 10,

include/linux/cpuhotplug.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ enum cpuhp_state {
1313
CPUHP_AP_ONLINE,
1414
CPUHP_TEARDOWN_CPU,
1515
CPUHP_AP_ONLINE_IDLE,
16+
CPUHP_AP_ACTIVE,
1617
CPUHP_AP_SMPBOOT_THREADS,
1718
CPUHP_AP_NOTIFY_ONLINE,
1819
CPUHP_AP_ONLINE_DYN,

include/linux/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,8 @@ extern void trap_init(void);
373373
extern void update_process_times(int user);
374374
extern void scheduler_tick(void);
375375
extern int sched_cpu_starting(unsigned int cpu);
376+
extern int sched_cpu_activate(unsigned int cpu);
377+
extern int sched_cpu_deactivate(unsigned int cpu);
376378

377379
extern void sched_show_task(struct task_struct *p);
378380

kernel/cpu.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -923,8 +923,6 @@ void cpuhp_online_idle(enum cpuhp_state state)
923923

924924
st->state = CPUHP_AP_ONLINE_IDLE;
925925

926-
/* The cpu is marked online, set it active now */
927-
set_cpu_active(cpu, true);
928926
/* Unpark the stopper thread and the hotplug thread of this cpu */
929927
stop_machine_unpark(cpu);
930928
kthread_unpark(st->thread);
@@ -1259,6 +1257,12 @@ static struct cpuhp_step cpuhp_ap_states[] = {
12591257
[CPUHP_AP_ONLINE] = {
12601258
.name = "ap:online",
12611259
},
1260+
/* First state is scheduler control. Interrupts are enabled */
1261+
[CPUHP_AP_ACTIVE] = {
1262+
.name = "sched:active",
1263+
.startup = sched_cpu_activate,
1264+
.teardown = sched_cpu_deactivate,
1265+
},
12621266
/* Handle smpboot threads park/unpark */
12631267
[CPUHP_AP_SMPBOOT_THREADS] = {
12641268
.name = "smpboot:threads",

kernel/sched/core.c

Lines changed: 21 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -6634,9 +6634,6 @@ static void sched_domains_numa_masks_set(unsigned int cpu)
66346634
int node = cpu_to_node(cpu);
66356635
int i, j;
66366636

6637-
if (!sched_smp_initialized)
6638-
return;
6639-
66406637
for (i = 0; i < sched_domains_numa_levels; i++) {
66416638
for (j = 0; j < nr_node_ids; j++) {
66426639
if (node_distance(j, node) <= sched_domains_numa_distance[i])
@@ -6649,9 +6646,6 @@ static void sched_domains_numa_masks_clear(unsigned int cpu)
66496646
{
66506647
int i, j;
66516648

6652-
if (!sched_smp_initialized)
6653-
return;
6654-
66556649
for (i = 0; i < sched_domains_numa_levels; i++) {
66566650
for (j = 0; j < nr_node_ids; j++)
66576651
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
@@ -7051,12 +7045,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
70517045
* If we come here as part of a suspend/resume, don't touch cpusets because we
70527046
* want to restore it back to its original state upon resume anyway.
70537047
*/
7054-
static void cpuset_cpu_active(bool frozen)
7048+
static void cpuset_cpu_active(void)
70557049
{
7056-
if (!sched_smp_initialized)
7057-
return;
7058-
7059-
if (frozen) {
7050+
if (cpuhp_tasks_frozen) {
70607051
/*
70617052
* num_cpus_frozen tracks how many CPUs are involved in suspend
70627053
* resume sequence. As long as this is not the last online
@@ -7077,17 +7068,14 @@ static void cpuset_cpu_active(bool frozen)
70777068
cpuset_update_active_cpus(true);
70787069
}
70797070

7080-
static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
7071+
static int cpuset_cpu_inactive(unsigned int cpu)
70817072
{
70827073
unsigned long flags;
70837074
struct dl_bw *dl_b;
70847075
bool overflow;
70857076
int cpus;
70867077

7087-
if (!sched_smp_initialized)
7088-
return 0;
7089-
7090-
if (!frozen) {
7078+
if (!cpuhp_tasks_frozen) {
70917079
rcu_read_lock_sched();
70927080
dl_b = dl_bw_of(cpu);
70937081

@@ -7108,42 +7096,33 @@ static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
71087096
return 0;
71097097
}
71107098

7111-
static int sched_cpu_active(struct notifier_block *nfb, unsigned long action,
7112-
void *hcpu)
7099+
int sched_cpu_activate(unsigned int cpu)
71137100
{
7114-
unsigned int cpu = (unsigned long)hcpu;
7101+
set_cpu_active(cpu, true);
71157102

7116-
switch (action & ~CPU_TASKS_FROZEN) {
7117-
case CPU_DOWN_FAILED:
7118-
case CPU_ONLINE:
7119-
set_cpu_active(cpu, true);
7103+
if (sched_smp_initialized) {
71207104
sched_domains_numa_masks_set(cpu);
7121-
cpuset_cpu_active(action & CPU_TASKS_FROZEN);
7122-
return NOTIFY_OK;
7123-
default:
7124-
return NOTIFY_DONE;
7105+
cpuset_cpu_active();
71257106
}
7107+
return 0;
71267108
}
71277109

7128-
static int sched_cpu_inactive(struct notifier_block *nfb,
7129-
unsigned long action, void *hcpu)
7110+
int sched_cpu_deactivate(unsigned int cpu)
71307111
{
7131-
unsigned int cpu = (unsigned long)hcpu;
71327112
int ret;
71337113

7134-
switch (action & ~CPU_TASKS_FROZEN) {
7135-
case CPU_DOWN_PREPARE:
7136-
set_cpu_active(cpu, false);
7137-
ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN);
7138-
if (ret) {
7139-
set_cpu_active(cpu, true);
7140-
return notifier_from_errno(ret);
7141-
}
7142-
sched_domains_numa_masks_clear(cpu);
7143-
return NOTIFY_OK;
7144-
default:
7145-
return NOTIFY_DONE;
7114+
set_cpu_active(cpu, false);
7115+
7116+
if (!sched_smp_initialized)
7117+
return 0;
7118+
7119+
ret = cpuset_cpu_inactive(cpu);
7120+
if (ret) {
7121+
set_cpu_active(cpu, true);
7122+
return ret;
71467123
}
7124+
sched_domains_numa_masks_clear(cpu);
7125+
return 0;
71477126
}
71487127

71497128
int sched_cpu_starting(unsigned int cpu)
@@ -7197,10 +7176,6 @@ static int __init migration_init(void)
71977176
migration_call(&migration_notifier, CPU_ONLINE, cpu);
71987177
register_cpu_notifier(&migration_notifier);
71997178

7200-
/* Register cpu active notifiers */
7201-
cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
7202-
cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
7203-
72047179
return 0;
72057180
}
72067181
early_initcall(migration_init);

0 commit comments

Comments
 (0)