@@ -5195,6 +5195,8 @@ int task_can_attach(struct task_struct *p,
51955195
51965196#ifdef CONFIG_SMP
51975197
5198+ static bool sched_smp_initialized __read_mostly ;
5199+
51985200#ifdef CONFIG_NUMA_BALANCING
51995201/* Migrate current task p to target_cpu */
52005202int migrate_task_to (struct task_struct * p , int target_cpu )
@@ -5513,25 +5515,6 @@ int sched_cpu_starting(unsigned int cpu)
55135515 return 0 ;
55145516}
55155517
5516- static int __init migration_init (void )
5517- {
5518- void * cpu = (void * )(long )smp_processor_id ();
5519- int err ;
5520-
5521- /* Initialize migration for the boot CPU */
5522- err = migration_call (& migration_notifier , CPU_UP_PREPARE , cpu );
5523- BUG_ON (err == NOTIFY_BAD );
5524- migration_call (& migration_notifier , CPU_ONLINE , cpu );
5525- register_cpu_notifier (& migration_notifier );
5526-
5527- /* Register cpu active notifiers */
5528- cpu_notifier (sched_cpu_active , CPU_PRI_SCHED_ACTIVE );
5529- cpu_notifier (sched_cpu_inactive , CPU_PRI_SCHED_INACTIVE );
5530-
5531- return 0 ;
5532- }
5533- early_initcall (migration_init );
5534-
55355518static cpumask_var_t sched_domains_tmpmask ; /* sched_domains_mutex */
55365519
55375520#ifdef CONFIG_SCHED_DEBUG
@@ -6711,6 +6694,9 @@ static int sched_domains_numa_masks_update(struct notifier_block *nfb,
67116694{
67126695 int cpu = (long )hcpu ;
67136696
6697+ if (!sched_smp_initialized )
6698+ return NOTIFY_DONE ;
6699+
67146700 switch (action & ~CPU_TASKS_FROZEN ) {
67156701 case CPU_ONLINE :
67166702 sched_domains_numa_masks_set (cpu );
@@ -7129,6 +7115,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
71297115static int cpuset_cpu_active (struct notifier_block * nfb , unsigned long action ,
71307116 void * hcpu )
71317117{
7118+ if (!sched_smp_initialized )
7119+ return NOTIFY_DONE ;
7120+
71327121 switch (action ) {
71337122 case CPU_ONLINE_FROZEN :
71347123 case CPU_DOWN_FAILED_FROZEN :
@@ -7169,6 +7158,9 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
71697158 bool overflow ;
71707159 int cpus ;
71717160
7161+ if (!sched_smp_initialized )
7162+ return NOTIFY_DONE ;
7163+
71727164 switch (action ) {
71737165 case CPU_DOWN_PREPARE :
71747166 rcu_read_lock_sched ();
@@ -7216,10 +7208,6 @@ void __init sched_init_smp(void)
72167208 cpumask_set_cpu (smp_processor_id (), non_isolated_cpus );
72177209 mutex_unlock (& sched_domains_mutex );
72187210
7219- hotcpu_notifier (sched_domains_numa_masks_update , CPU_PRI_SCHED_ACTIVE );
7220- hotcpu_notifier (cpuset_cpu_active , CPU_PRI_CPUSET_ACTIVE );
7221- hotcpu_notifier (cpuset_cpu_inactive , CPU_PRI_CPUSET_INACTIVE );
7222-
72237211 init_hrtick ();
72247212
72257213 /* Move init over to a non-isolated CPU */
@@ -7230,7 +7218,32 @@ void __init sched_init_smp(void)
72307218
72317219 init_sched_rt_class ();
72327220 init_sched_dl_class ();
7221+ sched_smp_initialized = true;
72337222}
7223+
7224+ static int __init migration_init (void )
7225+ {
7226+ void * cpu = (void * )(long )smp_processor_id ();
7227+ int err ;
7228+
7229+ /* Initialize migration for the boot CPU */
7230+ err = migration_call (& migration_notifier , CPU_UP_PREPARE , cpu );
7231+ BUG_ON (err == NOTIFY_BAD );
7232+ migration_call (& migration_notifier , CPU_ONLINE , cpu );
7233+ register_cpu_notifier (& migration_notifier );
7234+
7235+ /* Register cpu active notifiers */
7236+ cpu_notifier (sched_cpu_active , CPU_PRI_SCHED_ACTIVE );
7237+ cpu_notifier (sched_cpu_inactive , CPU_PRI_SCHED_INACTIVE );
7238+
7239+ hotcpu_notifier (sched_domains_numa_masks_update , CPU_PRI_SCHED_ACTIVE );
7240+ hotcpu_notifier (cpuset_cpu_active , CPU_PRI_CPUSET_ACTIVE );
7241+ hotcpu_notifier (cpuset_cpu_inactive , CPU_PRI_CPUSET_INACTIVE );
7242+
7243+ return 0 ;
7244+ }
7245+ early_initcall (migration_init );
7246+
72347247#else
72357248void __init sched_init_smp (void )
72367249{
0 commit comments