From e4fd1031ccb8f60c0b27cfc1cdb277584049d106 Mon Sep 17 00:00:00 2001 From: Amery Hung Date: Mon, 17 Nov 2025 10:56:04 -0800 Subject: [PATCH] bpf: Annotate rqspinlock lock acquiring functions with __must_check Locking a resilient queued spinlock can fail when deadlock or timeout happen. Mark the lock acquring functions with __must_check to make sure callers always handle the returned error. Suggested-by: Andrii Nakryiko Signed-off-by: Amery Hung --- include/asm-generic/rqspinlock.h | 47 +++++++++++++++++++------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h index 6d4244d643df3..855c094355060 100644 --- a/include/asm-generic/rqspinlock.h +++ b/include/asm-generic/rqspinlock.h @@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void) * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock. * * -ETIMEDOUT - Lock acquisition failed because of timeout. */ -static __always_inline int res_spin_lock(rqspinlock_t *lock) +static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock) { int val = 0; @@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock) #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; }) #endif -#define raw_res_spin_lock(lock) \ - ({ \ - int __ret; \ - preempt_disable(); \ - __ret = res_spin_lock(lock); \ - if (__ret) \ - preempt_enable(); \ - __ret; \ - }) +static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock) +{ + int ret; + + preempt_disable(); + ret = res_spin_lock(lock); + if (ret) + preempt_enable(); + + return ret; +} #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); }) -#define raw_res_spin_lock_irqsave(lock, flags) \ - ({ \ - int __ret; \ - local_irq_save(flags); \ - __ret = raw_res_spin_lock(lock); \ - if (__ret) \ - local_irq_restore(flags); \ - __ret; \ - }) +static __always_inline __must_check int +__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags) +{ + unsigned long __flags; + int ret; + + local_irq_save(__flags); + ret = raw_res_spin_lock(lock); + if (ret) + local_irq_restore(__flags); + + *flags = __flags; + return ret; +} + +#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags) #define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })