@@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void)
315315#define RCU_LOCKDEP_WARN (c , s ) \
316316 do { \
317317 static bool __section(".data.unlikely") __warned; \
318- if (debug_lockdep_rcu_enabled( ) && !__warned && (c) ) { \
318+ if ((c ) && debug_lockdep_rcu_enabled() && !__warned ) { \
319319 __warned = true; \
320320 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
321321 } \
@@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { }
363363#define rcu_check_sparse (p , space )
364364#endif /* #else #ifdef __CHECKER__ */
365365
366+ /**
367+ * unrcu_pointer - mark a pointer as not being RCU protected
368+ * @p: pointer needing to lose its __rcu property
369+ *
370+ * Converts @p from an __rcu pointer to a __kernel pointer.
371+ * This allows an __rcu pointer to be used with xchg() and friends.
372+ */
373+ #define unrcu_pointer (p ) \
374+ ({ \
375+ typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
376+ rcu_check_sparse(p, __rcu); \
377+ ((typeof(*p) __force __kernel *)(_________p1)); \
378+ })
379+
366380#define __rcu_access_pointer (p , space ) \
367381({ \
368382 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
@@ -518,7 +532,12 @@ do { \
518532 * @p: The pointer to read, prior to dereferencing
519533 * @c: The conditions under which the dereference will take place
520534 *
521- * This is the RCU-bh counterpart to rcu_dereference_check().
535+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
536+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
537+ * wait for local_bh_disable() regions of code in addition to regions of
538+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
539+ * that synchronize_rcu(), call_rcu, and friends all take not only
540+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
522541 */
523542#define rcu_dereference_bh_check (p , c ) \
524543 __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
@@ -529,6 +548,11 @@ do { \
529548 * @c: The conditions under which the dereference will take place
530549 *
531550 * This is the RCU-sched counterpart to rcu_dereference_check().
551+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
552+ * periods wait for preempt_disable() regions of code in addition to
553+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
554+ * This means that synchronize_rcu(), call_rcu, and friends all take not
555+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
532556 */
533557#define rcu_dereference_sched_check (p , c ) \
534558 __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
@@ -620,6 +644,12 @@ do { \
620644 * sections, invocation of the corresponding RCU callback is deferred
621645 * until after the all the other CPUs exit their critical sections.
622646 *
647+ * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
648+ * wait for regions of code with preemption disabled, including regions of
649+ * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
650+ * define synchronize_sched(), only code enclosed within rcu_read_lock()
651+ * and rcu_read_unlock() are guaranteed to be waited for.
652+ *
623653 * Note, however, that RCU callbacks are permitted to run concurrently
624654 * with new RCU read-side critical sections. One way that this can happen
625655 * is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -672,33 +702,12 @@ static __always_inline void rcu_read_lock(void)
672702/**
673703 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
674704 *
675- * In most situations, rcu_read_unlock() is immune from deadlock.
676- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
677- * is responsible for deboosting, which it does via rt_mutex_unlock().
678- * Unfortunately, this function acquires the scheduler's runqueue and
679- * priority-inheritance spinlocks. This means that deadlock could result
680- * if the caller of rcu_read_unlock() already holds one of these locks or
681- * any lock that is ever acquired while holding them.
682- *
683- * That said, RCU readers are never priority boosted unless they were
684- * preempted. Therefore, one way to avoid deadlock is to make sure
685- * that preemption never happens within any RCU read-side critical
686- * section whose outermost rcu_read_unlock() is called with one of
687- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
688- * a number of ways, for example, by invoking preempt_disable() before
689- * critical section's outermost rcu_read_lock().
690- *
691- * Given that the set of locks acquired by rt_mutex_unlock() might change
692- * at any time, a somewhat more future-proofed approach is to make sure
693- * that that preemption never happens within any RCU read-side critical
694- * section whose outermost rcu_read_unlock() is called with irqs disabled.
695- * This approach relies on the fact that rt_mutex_unlock() currently only
696- * acquires irq-disabled locks.
697- *
698- * The second of these two approaches is best in most situations,
699- * however, the first approach can also be useful, at least to those
700- * developers willing to keep abreast of the set of locks acquired by
701- * rt_mutex_unlock().
705+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
706+ * In recent kernels that have consolidated synchronize_sched() and
707+ * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
708+ * also extends to the scheduler's runqueue and priority-inheritance
709+ * spinlocks, courtesy of the quiescent-state deferral that is carried
710+ * out when rcu_read_unlock() is invoked with interrupts disabled.
702711 *
703712 * See rcu_read_lock() for more information.
704713 */
@@ -714,9 +723,11 @@ static inline void rcu_read_unlock(void)
714723/**
715724 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
716725 *
717- * This is equivalent of rcu_read_lock(), but also disables softirqs.
718- * Note that anything else that disables softirqs can also serve as
719- * an RCU read-side critical section.
726+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
727+ * Note that anything else that disables softirqs can also serve as an RCU
728+ * read-side critical section. However, please note that this equivalence
729+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
730+ * rcu_read_lock_bh() were unrelated.
720731 *
721732 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
722733 * must occur in the same context, for example, it is illegal to invoke
@@ -749,9 +760,12 @@ static inline void rcu_read_unlock_bh(void)
749760/**
750761 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
751762 *
752- * This is equivalent of rcu_read_lock(), but disables preemption.
753- * Read-side critical sections can also be introduced by anything else
754- * that disables preemption, including local_irq_disable() and friends.
763+ * This is equivalent to rcu_read_lock(), but also disables preemption.
764+ * Read-side critical sections can also be introduced by anything else that
765+ * disables preemption, including local_irq_disable() and friends. However,
766+ * please note that the equivalence to rcu_read_lock() applies only to
767+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
768+ * were unrelated.
755769 *
756770 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
757771 * must occur in the same context, for example, it is illegal to invoke
0 commit comments