@@ -116,85 +116,6 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
116116 return cpu ;
117117}
118118
119- #if NR_CPUS == 1
120- /* Uniprocessor. Assume all masks are "1". */
121- static inline unsigned int cpumask_first (const struct cpumask * srcp )
122- {
123- return 0 ;
124- }
125-
126- static inline unsigned int cpumask_first_zero (const struct cpumask * srcp )
127- {
128- return 0 ;
129- }
130-
131- static inline unsigned int cpumask_first_and (const struct cpumask * srcp1 ,
132- const struct cpumask * srcp2 )
133- {
134- return 0 ;
135- }
136-
137- static inline unsigned int cpumask_last (const struct cpumask * srcp )
138- {
139- return 0 ;
140- }
141-
142- /* Valid inputs for n are -1 and 0. */
143- static inline unsigned int cpumask_next (int n , const struct cpumask * srcp )
144- {
145- return n + 1 ;
146- }
147-
148- static inline unsigned int cpumask_next_zero (int n , const struct cpumask * srcp )
149- {
150- return n + 1 ;
151- }
152-
153- static inline unsigned int cpumask_next_and (int n ,
154- const struct cpumask * srcp ,
155- const struct cpumask * andp )
156- {
157- return n + 1 ;
158- }
159-
160- static inline unsigned int cpumask_next_wrap (int n , const struct cpumask * mask ,
161- int start , bool wrap )
162- {
163- /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
164- return (wrap && n == 0 );
165- }
166-
167- /* cpu must be a valid cpu, ie 0, so there's no other choice. */
168- static inline unsigned int cpumask_any_but (const struct cpumask * mask ,
169- unsigned int cpu )
170- {
171- return 1 ;
172- }
173-
174- static inline unsigned int cpumask_local_spread (unsigned int i , int node )
175- {
176- return 0 ;
177- }
178-
179- static inline int cpumask_any_and_distribute (const struct cpumask * src1p ,
180- const struct cpumask * src2p ) {
181- return cpumask_first_and (src1p , src2p );
182- }
183-
184- static inline int cpumask_any_distribute (const struct cpumask * srcp )
185- {
186- return cpumask_first (srcp );
187- }
188-
189- #define for_each_cpu (cpu , mask ) \
190- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
191- #define for_each_cpu_not (cpu , mask ) \
192- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
193- #define for_each_cpu_wrap (cpu , mask , start ) \
194- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
195- #define for_each_cpu_and (cpu , mask1 , mask2 ) \
196- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
197- #else
198119/**
199120 * cpumask_first - get the first cpu in a cpumask
200121 * @srcp: the cpumask pointer
@@ -260,10 +181,29 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
260181
261182int __pure cpumask_next_and (int n , const struct cpumask * , const struct cpumask * );
262183int __pure cpumask_any_but (const struct cpumask * mask , unsigned int cpu );
184+
185+ #if NR_CPUS == 1
186+ /* Uniprocessor: there is only one valid CPU */
187+ static inline unsigned int cpumask_local_spread (unsigned int i , int node )
188+ {
189+ return 0 ;
190+ }
191+
192+ static inline int cpumask_any_and_distribute (const struct cpumask * src1p ,
193+ const struct cpumask * src2p ) {
194+ return cpumask_first_and (src1p , src2p );
195+ }
196+
197+ static inline int cpumask_any_distribute (const struct cpumask * srcp )
198+ {
199+ return cpumask_first (srcp );
200+ }
201+ #else
263202unsigned int cpumask_local_spread (unsigned int i , int node );
264203int cpumask_any_and_distribute (const struct cpumask * src1p ,
265204 const struct cpumask * src2p );
266205int cpumask_any_distribute (const struct cpumask * srcp );
206+ #endif /* NR_CPUS */
267207
268208/**
269209 * for_each_cpu - iterate over every cpu in a mask
@@ -324,7 +264,6 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
324264 for ((cpu) = -1; \
325265 (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
326266 (cpu) < nr_cpu_ids;)
327- #endif /* SMP */
328267
329268#define CPU_BITS_NONE \
330269{ \
0 commit comments