Skip to content

Commit 641faf1

Browse files
committed
Merge branches 'bitmaprange.2021.05.10c', 'doc.2021.05.10c', 'fixes.2021.05.13a', 'kvfree_rcu.2021.05.10c', 'mmdumpobj.2021.05.10c', 'nocb.2021.05.12a', 'srcu.2021.05.12a', 'tasks.2021.05.18a' and 'torture.2021.05.10c' into HEAD
bitmaprange.2021.05.10c: Allow "all" for bitmap ranges. doc.2021.05.10c: Documentation updates. fixes.2021.05.13a: Miscellaneous fixes. kvfree_rcu.2021.05.10c: kvfree_rcu() updates. mmdumpobj.2021.05.10c: mem_dump_obj() updates. nocb.2021.05.12a: RCU NOCB CPU updates, including limited deoffloading. srcu.2021.05.12a: SRCU updates. tasks.2021.05.18a: Tasks-RCU updates. torture.2021.05.10c: Torture-test updates.
9 parents a6814a7 + e5bd61e + c70360c + a78d4a2 + e548eaa + a616aec + 0a580fa + 474d099 + 5390473 commit 641faf1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+1241
-570
lines changed

Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ Any code that happens after the end of a given RCU grace period is guaranteed
2121
to see the effects of all accesses prior to the beginning of that grace
2222
period that are within RCU read-side critical sections.
2323
Similarly, any code that happens before the beginning of a given RCU grace
24-
period is guaranteed to see the effects of all accesses following the end
24+
period is guaranteed to not see the effects of all accesses following the end
2525
of that grace period that are within RCU read-side critical sections.
2626

2727
Note well that RCU-sched read-side critical sections include any region
@@ -339,14 +339,14 @@ The diagram below shows the path of ordering if the leftmost
339339
leftmost ``rcu_node`` structure offlines its last CPU and if the next
340340
``rcu_node`` structure has no online CPUs).
341341

342-
.. kernel-figure:: TreeRCU-gp-init-1.svg
342+
.. kernel-figure:: TreeRCU-gp-init-2.svg
343343

344344
The final ``rcu_gp_init()`` pass through the ``rcu_node`` tree traverses
345345
breadth-first, setting each ``rcu_node`` structure's ``->gp_seq`` field
346346
to the newly advanced value from the ``rcu_state`` structure, as shown
347347
in the following diagram.
348348

349-
.. kernel-figure:: TreeRCU-gp-init-1.svg
349+
.. kernel-figure:: TreeRCU-gp-init-3.svg
350350

351351
This change will also cause each CPU's next call to
352352
``__note_gp_changes()`` to notice that a new grace period has started,

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4290,6 +4290,11 @@
42904290
whole algorithm to behave better in low memory
42914291
condition.
42924292

4293+
rcutree.rcu_delay_page_cache_fill_msec= [KNL]
4294+
Set the page-cache refill delay (in milliseconds)
4295+
in response to low-memory conditions. The range
4296+
of permitted values is in the range 0:100000.
4297+
42934298
rcutree.jiffies_till_first_fqs= [KNL]
42944299
Set delay from grace-period initialization to
42954300
first attempt to force quiescent states.

include/linux/rcupdate.h

Lines changed: 49 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void)
315315
#define RCU_LOCKDEP_WARN(c, s) \
316316
do { \
317317
static bool __section(".data.unlikely") __warned; \
318-
if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
318+
if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
319319
__warned = true; \
320320
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
321321
} \
@@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { }
363363
#define rcu_check_sparse(p, space)
364364
#endif /* #else #ifdef __CHECKER__ */
365365

366+
/**
367+
* unrcu_pointer - mark a pointer as not being RCU protected
368+
* @p: pointer needing to lose its __rcu property
369+
*
370+
* Converts @p from an __rcu pointer to a __kernel pointer.
371+
* This allows an __rcu pointer to be used with xchg() and friends.
372+
*/
373+
#define unrcu_pointer(p) \
374+
({ \
375+
typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
376+
rcu_check_sparse(p, __rcu); \
377+
((typeof(*p) __force __kernel *)(_________p1)); \
378+
})
379+
366380
#define __rcu_access_pointer(p, space) \
367381
({ \
368382
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
@@ -518,7 +532,12 @@ do { \
518532
* @p: The pointer to read, prior to dereferencing
519533
* @c: The conditions under which the dereference will take place
520534
*
521-
* This is the RCU-bh counterpart to rcu_dereference_check().
535+
* This is the RCU-bh counterpart to rcu_dereference_check(). However,
536+
* please note that starting in v5.0 kernels, vanilla RCU grace periods
537+
* wait for local_bh_disable() regions of code in addition to regions of
538+
* code demarked by rcu_read_lock() and rcu_read_unlock(). This means
539+
* that synchronize_rcu(), call_rcu, and friends all take not only
540+
* rcu_read_lock() but also rcu_read_lock_bh() into account.
522541
*/
523542
#define rcu_dereference_bh_check(p, c) \
524543
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
@@ -529,6 +548,11 @@ do { \
529548
* @c: The conditions under which the dereference will take place
530549
*
531550
* This is the RCU-sched counterpart to rcu_dereference_check().
551+
* However, please note that starting in v5.0 kernels, vanilla RCU grace
552+
* periods wait for preempt_disable() regions of code in addition to
553+
* regions of code demarked by rcu_read_lock() and rcu_read_unlock().
554+
* This means that synchronize_rcu(), call_rcu, and friends all take not
555+
* only rcu_read_lock() but also rcu_read_lock_sched() into account.
532556
*/
533557
#define rcu_dereference_sched_check(p, c) \
534558
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
@@ -620,6 +644,12 @@ do { \
620644
* sections, invocation of the corresponding RCU callback is deferred
621645
* until after the all the other CPUs exit their critical sections.
622646
*
647+
* In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
648+
* wait for regions of code with preemption disabled, including regions of
649+
* code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
650+
* define synchronize_sched(), only code enclosed within rcu_read_lock()
651+
* and rcu_read_unlock() are guaranteed to be waited for.
652+
*
623653
* Note, however, that RCU callbacks are permitted to run concurrently
624654
* with new RCU read-side critical sections. One way that this can happen
625655
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -672,33 +702,12 @@ static __always_inline void rcu_read_lock(void)
672702
/**
673703
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
674704
*
675-
* In most situations, rcu_read_unlock() is immune from deadlock.
676-
* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
677-
* is responsible for deboosting, which it does via rt_mutex_unlock().
678-
* Unfortunately, this function acquires the scheduler's runqueue and
679-
* priority-inheritance spinlocks. This means that deadlock could result
680-
* if the caller of rcu_read_unlock() already holds one of these locks or
681-
* any lock that is ever acquired while holding them.
682-
*
683-
* That said, RCU readers are never priority boosted unless they were
684-
* preempted. Therefore, one way to avoid deadlock is to make sure
685-
* that preemption never happens within any RCU read-side critical
686-
* section whose outermost rcu_read_unlock() is called with one of
687-
* rt_mutex_unlock()'s locks held. Such preemption can be avoided in
688-
* a number of ways, for example, by invoking preempt_disable() before
689-
* critical section's outermost rcu_read_lock().
690-
*
691-
* Given that the set of locks acquired by rt_mutex_unlock() might change
692-
* at any time, a somewhat more future-proofed approach is to make sure
693-
* that that preemption never happens within any RCU read-side critical
694-
* section whose outermost rcu_read_unlock() is called with irqs disabled.
695-
* This approach relies on the fact that rt_mutex_unlock() currently only
696-
* acquires irq-disabled locks.
697-
*
698-
* The second of these two approaches is best in most situations,
699-
* however, the first approach can also be useful, at least to those
700-
* developers willing to keep abreast of the set of locks acquired by
701-
* rt_mutex_unlock().
705+
* In almost all situations, rcu_read_unlock() is immune from deadlock.
706+
* In recent kernels that have consolidated synchronize_sched() and
707+
* synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
708+
* also extends to the scheduler's runqueue and priority-inheritance
709+
* spinlocks, courtesy of the quiescent-state deferral that is carried
710+
* out when rcu_read_unlock() is invoked with interrupts disabled.
702711
*
703712
* See rcu_read_lock() for more information.
704713
*/
@@ -714,9 +723,11 @@ static inline void rcu_read_unlock(void)
714723
/**
715724
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
716725
*
717-
* This is equivalent of rcu_read_lock(), but also disables softirqs.
718-
* Note that anything else that disables softirqs can also serve as
719-
* an RCU read-side critical section.
726+
* This is equivalent to rcu_read_lock(), but also disables softirqs.
727+
* Note that anything else that disables softirqs can also serve as an RCU
728+
* read-side critical section. However, please note that this equivalence
729+
* applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
730+
* rcu_read_lock_bh() were unrelated.
720731
*
721732
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
722733
* must occur in the same context, for example, it is illegal to invoke
@@ -749,9 +760,12 @@ static inline void rcu_read_unlock_bh(void)
749760
/**
750761
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
751762
*
752-
* This is equivalent of rcu_read_lock(), but disables preemption.
753-
* Read-side critical sections can also be introduced by anything else
754-
* that disables preemption, including local_irq_disable() and friends.
763+
* This is equivalent to rcu_read_lock(), but also disables preemption.
764+
* Read-side critical sections can also be introduced by anything else that
765+
* disables preemption, including local_irq_disable() and friends. However,
766+
* please note that the equivalence to rcu_read_lock() applies only to
767+
* v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
768+
* were unrelated.
755769
*
756770
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
757771
* must occur in the same context, for example, it is illegal to invoke

include/linux/rcutiny.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { }
8686
static inline void rcu_irq_exit_irqson(void) { }
8787
static inline void rcu_irq_enter_irqson(void) { }
8888
static inline void rcu_irq_exit(void) { }
89-
static inline void rcu_irq_exit_preempt(void) { }
9089
static inline void rcu_irq_exit_check_preempt(void) { }
9190
#define rcu_is_idle_cpu(cpu) \
9291
(is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())

include/linux/rcutree.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ void rcu_idle_enter(void);
4949
void rcu_idle_exit(void);
5050
void rcu_irq_enter(void);
5151
void rcu_irq_exit(void);
52-
void rcu_irq_exit_preempt(void);
5352
void rcu_irq_enter_irqson(void);
5453
void rcu_irq_exit_irqson(void);
5554
bool rcu_is_idle_cpu(int cpu);

include/linux/srcu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
6464
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
6565
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
6666

67+
#ifdef CONFIG_SRCU
68+
void srcu_init(void);
69+
#else /* #ifdef CONFIG_SRCU */
70+
static inline void srcu_init(void) { }
71+
#endif /* #else #ifdef CONFIG_SRCU */
72+
6773
#ifdef CONFIG_DEBUG_LOCK_ALLOC
6874

6975
/**

include/linux/srcutree.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,7 @@ struct srcu_struct {
8282
/* callback for the barrier */
8383
/* operation. */
8484
struct delayed_work work;
85-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
8685
struct lockdep_map dep_map;
87-
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
8886
};
8987

9088
/* Values for state variable (bottom bits of ->srcu_gp_seq). */

include/linux/timer.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
192192

193193
#define del_singleshot_timer_sync(t) del_timer_sync(t)
194194

195-
extern bool timer_curr_running(struct timer_list *timer);
196-
197195
extern void init_timers(void);
198196
struct hrtimer;
199197
extern enum hrtimer_restart it_real_fn(struct hrtimer *);

include/trace/events/rcu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
278278
* "WakeNot": Don't wake rcuo kthread.
279279
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
280280
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
281+
* "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
281282
* "WokeEmpty": rcuo CB kthread woke to find empty list.
282283
*/
283284
TRACE_EVENT_RCU(rcu_nocb_wake,

init/main.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
#include <linux/profile.h>
4343
#include <linux/kfence.h>
4444
#include <linux/rcupdate.h>
45+
#include <linux/srcu.h>
4546
#include <linux/moduleparam.h>
4647
#include <linux/kallsyms.h>
4748
#include <linux/writeback.h>
@@ -979,6 +980,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
979980
tick_init();
980981
rcu_init_nohz();
981982
init_timers();
983+
srcu_init();
982984
hrtimers_init();
983985
softirq_init();
984986
timekeeping_init();

0 commit comments

Comments
 (0)