@@ -7845,6 +7845,30 @@ void set_rq_offline(struct rq *rq)
7845
7845
}
7846
7846
}
7847
7847
7848
+ static inline void sched_set_rq_online (struct rq * rq , int cpu )
7849
+ {
7850
+ struct rq_flags rf ;
7851
+
7852
+ rq_lock_irqsave (rq , & rf );
7853
+ if (rq -> rd ) {
7854
+ BUG_ON (!cpumask_test_cpu (cpu , rq -> rd -> span ));
7855
+ set_rq_online (rq );
7856
+ }
7857
+ rq_unlock_irqrestore (rq , & rf );
7858
+ }
7859
+
7860
+ static inline void sched_set_rq_offline (struct rq * rq , int cpu )
7861
+ {
7862
+ struct rq_flags rf ;
7863
+
7864
+ rq_lock_irqsave (rq , & rf );
7865
+ if (rq -> rd ) {
7866
+ BUG_ON (!cpumask_test_cpu (cpu , rq -> rd -> span ));
7867
+ set_rq_offline (rq );
7868
+ }
7869
+ rq_unlock_irqrestore (rq , & rf );
7870
+ }
7871
+
7848
7872
/*
7849
7873
* used to mark begin/end of suspend/resume:
7850
7874
*/
@@ -7914,7 +7938,6 @@ static inline void sched_smt_present_dec(int cpu)
7914
7938
int sched_cpu_activate (unsigned int cpu )
7915
7939
{
7916
7940
struct rq * rq = cpu_rq (cpu );
7917
- struct rq_flags rf ;
7918
7941
7919
7942
/*
7920
7943
* Clear the balance_push callback and prepare to schedule
@@ -7943,20 +7966,14 @@ int sched_cpu_activate(unsigned int cpu)
7943
7966
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7944
7967
* domains.
7945
7968
*/
7946
- rq_lock_irqsave (rq , & rf );
7947
- if (rq -> rd ) {
7948
- BUG_ON (!cpumask_test_cpu (cpu , rq -> rd -> span ));
7949
- set_rq_online (rq );
7950
- }
7951
- rq_unlock_irqrestore (rq , & rf );
7969
+ sched_set_rq_online (rq , cpu );
7952
7970
7953
7971
return 0 ;
7954
7972
}
7955
7973
7956
7974
int sched_cpu_deactivate (unsigned int cpu )
7957
7975
{
7958
7976
struct rq * rq = cpu_rq (cpu );
7959
- struct rq_flags rf ;
7960
7977
int ret ;
7961
7978
7962
7979
/*
@@ -7987,12 +8004,7 @@ int sched_cpu_deactivate(unsigned int cpu)
7987
8004
*/
7988
8005
synchronize_rcu ();
7989
8006
7990
- rq_lock_irqsave (rq , & rf );
7991
- if (rq -> rd ) {
7992
- BUG_ON (!cpumask_test_cpu (cpu , rq -> rd -> span ));
7993
- set_rq_offline (rq );
7994
- }
7995
- rq_unlock_irqrestore (rq , & rf );
8007
+ sched_set_rq_offline (rq , cpu );
7996
8008
7997
8009
/*
7998
8010
* When going down, decrement the number of cores with SMT present.
0 commit comments