Skip to content

Commit c743f0a

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair, cpumask: Export for_each_cpu_wrap()
More users for for_each_cpu_wrap() have appeared. Promote the construct to generic cpumask interface. The implementation is slightly modified to reduce arguments. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Lauro Ramos Venancio <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 8c03346 commit c743f0a

File tree

3 files changed

+53
-41
lines changed

3 files changed

+53
-41
lines changed

include/linux/cpumask.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
236236
(cpu) = cpumask_next_zero((cpu), (mask)), \
237237
(cpu) < nr_cpu_ids;)
238238

239+
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
240+
241+
/**
242+
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
243+
* @cpu: the (optionally unsigned) integer iterator
244+
* @mask: the cpumask poiter
245+
* @start: the start location
246+
*
247+
* The implementation does not assume any bit in @mask is set (including @start).
248+
*
249+
* After the loop, cpu is >= nr_cpu_ids.
250+
*/
251+
#define for_each_cpu_wrap(cpu, mask, start) \
252+
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
253+
(cpu) < nr_cpumask_bits; \
254+
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
255+
239256
/**
240257
* for_each_cpu_and - iterate over every cpu in both masks
241258
* @cpu: the (optionally unsigned) integer iterator

kernel/sched/fair.c

Lines changed: 4 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -5640,43 +5640,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
56405640
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
56415641
}
56425642

5643-
/*
5644-
* Implement a for_each_cpu() variant that starts the scan at a given cpu
5645-
* (@start), and wraps around.
5646-
*
5647-
* This is used to scan for idle CPUs; such that not all CPUs looking for an
5648-
* idle CPU find the same CPU. The down-side is that tasks tend to cycle
5649-
* through the LLC domain.
5650-
*
5651-
* Especially tbench is found sensitive to this.
5652-
*/
5653-
5654-
static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5655-
{
5656-
int next;
5657-
5658-
again:
5659-
next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5660-
5661-
if (*wrapped) {
5662-
if (next >= start)
5663-
return nr_cpumask_bits;
5664-
} else {
5665-
if (next >= nr_cpumask_bits) {
5666-
*wrapped = 1;
5667-
n = -1;
5668-
goto again;
5669-
}
5670-
}
5671-
5672-
return next;
5673-
}
5674-
5675-
#define for_each_cpu_wrap(cpu, mask, start, wrap) \
5676-
for ((wrap) = 0, (cpu) = (start)-1; \
5677-
(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
5678-
(cpu) < nr_cpumask_bits; )
5679-
56805643
#ifdef CONFIG_SCHED_SMT
56815644

56825645
static inline void set_idle_cores(int cpu, int val)
@@ -5736,7 +5699,7 @@ void __update_idle_core(struct rq *rq)
57365699
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
57375700
{
57385701
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5739-
int core, cpu, wrap;
5702+
int core, cpu;
57405703

57415704
if (!static_branch_likely(&sched_smt_present))
57425705
return -1;
@@ -5746,7 +5709,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
57465709

57475710
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
57485711

5749-
for_each_cpu_wrap(core, cpus, target, wrap) {
5712+
for_each_cpu_wrap(core, cpus, target) {
57505713
bool idle = true;
57515714

57525715
for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -5812,7 +5775,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
58125775
u64 avg_cost, avg_idle = this_rq()->avg_idle;
58135776
u64 time, cost;
58145777
s64 delta;
5815-
int cpu, wrap;
5778+
int cpu;
58165779

58175780
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
58185781
if (!this_sd)
@@ -5829,7 +5792,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
58295792

58305793
time = local_clock();
58315794

5832-
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5795+
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
58335796
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
58345797
continue;
58355798
if (idle_cpu(cpu))

lib/cpumask.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
4343
}
4444
EXPORT_SYMBOL(cpumask_any_but);
4545

46+
/**
47+
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
48+
* @n: the cpu prior to the place to search
49+
* @mask: the cpumask pointer
50+
* @start: the start point of the iteration
51+
* @wrap: assume @n crossing @start terminates the iteration
52+
*
53+
* Returns >= nr_cpu_ids on completion
54+
*
55+
* Note: the @wrap argument is required for the start condition when
56+
* we cannot assume @start is set in @mask.
57+
*/
58+
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
59+
{
60+
int next;
61+
62+
again:
63+
next = cpumask_next(n, mask);
64+
65+
if (wrap && n < start && next >= start) {
66+
return nr_cpumask_bits;
67+
68+
} else if (next >= nr_cpumask_bits) {
69+
wrap = true;
70+
n = -1;
71+
goto again;
72+
}
73+
74+
return next;
75+
}
76+
EXPORT_SYMBOL(cpumask_next_wrap);
77+
4678
/* These are not inline because of header tangles. */
4779
#ifdef CONFIG_CPUMASK_OFFSTACK
4880
/**

0 commit comments

Comments
 (0)