@@ -5640,43 +5640,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
56405640 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu ;
56415641}
56425642
5643- /*
5644- * Implement a for_each_cpu() variant that starts the scan at a given cpu
5645- * (@start), and wraps around.
5646- *
5647- * This is used to scan for idle CPUs; such that not all CPUs looking for an
5648- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5649- * through the LLC domain.
5650- *
5651- * Especially tbench is found sensitive to this.
5652- */
5653-
5654- static int cpumask_next_wrap (int n , const struct cpumask * mask , int start , int * wrapped )
5655- {
5656- int next ;
5657-
5658- again :
5659- next = find_next_bit (cpumask_bits (mask ), nr_cpumask_bits , n + 1 );
5660-
5661- if (* wrapped ) {
5662- if (next >= start )
5663- return nr_cpumask_bits ;
5664- } else {
5665- if (next >= nr_cpumask_bits ) {
5666- * wrapped = 1 ;
5667- n = -1 ;
5668- goto again ;
5669- }
5670- }
5671-
5672- return next ;
5673- }
5674-
5675- #define for_each_cpu_wrap (cpu , mask , start , wrap ) \
5676- for ((wrap) = 0, (cpu) = (start)-1; \
5677- (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
5678- (cpu) < nr_cpumask_bits; )
5679-
56805643#ifdef CONFIG_SCHED_SMT
56815644
56825645static inline void set_idle_cores (int cpu , int val )
@@ -5736,7 +5699,7 @@ void __update_idle_core(struct rq *rq)
57365699static int select_idle_core (struct task_struct * p , struct sched_domain * sd , int target )
57375700{
57385701 struct cpumask * cpus = this_cpu_cpumask_var_ptr (select_idle_mask );
5739- int core , cpu , wrap ;
5702+ int core , cpu ;
57405703
57415704 if (!static_branch_likely (& sched_smt_present ))
57425705 return -1 ;
@@ -5746,7 +5709,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
57465709
57475710 cpumask_and (cpus , sched_domain_span (sd ), & p -> cpus_allowed );
57485711
5749- for_each_cpu_wrap (core , cpus , target , wrap ) {
5712+ for_each_cpu_wrap (core , cpus , target ) {
57505713 bool idle = true;
57515714
57525715 for_each_cpu (cpu , cpu_smt_mask (core )) {
@@ -5812,7 +5775,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
58125775 u64 avg_cost , avg_idle = this_rq ()-> avg_idle ;
58135776 u64 time , cost ;
58145777 s64 delta ;
5815- int cpu , wrap ;
5778+ int cpu ;
58165779
58175780 this_sd = rcu_dereference (* this_cpu_ptr (& sd_llc ));
58185781 if (!this_sd )
@@ -5829,7 +5792,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
58295792
58305793 time = local_clock ();
58315794
5832- for_each_cpu_wrap (cpu , sched_domain_span (sd ), target , wrap ) {
5795+ for_each_cpu_wrap (cpu , sched_domain_span (sd ), target ) {
58335796 if (!cpumask_test_cpu (cpu , & p -> cpus_allowed ))
58345797 continue ;
58355798 if (idle_cpu (cpu ))
0 commit comments