Skip to content

Commit 8b01af8

Browse files
Paul Gortmakerrostedt
authored andcommitted
list_bl: Make list head locking RT safe
As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal head lock rt safe") we do the same thing here. We use the non atomic __set_bit and __clear_bit inside the scope of the lock to preserve the ability of the existing LIST_DEBUG code to use the zero'th bit in the sanity checks. As a bit spinlock, we had no lockdep visibility into the usage of the list head locking. Now, if we were to implement it as a standard non-raw spinlock, we would see: BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd 5 locks held by udevd/122: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [<ffffffff811967e8>] lock_rename+0xe8/0xf0 #1: (rename_lock){+.+...}, at: [<ffffffff811a277c>] d_move+0x2c/0x60 #2: (&dentry->d_lock){+.+...}, at: [<ffffffff811a0763>] dentry_lock_for_move+0xf3/0x130 #3: (&dentry->d_lock/2){+.+...}, at: [<ffffffff811a0734>] dentry_lock_for_move+0xc4/0x130 #4: (&dentry->d_lock/3){+.+...}, at: [<ffffffff811a0747>] dentry_lock_for_move+0xd7/0x130 Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 Call Trace: [<ffffffff810b9624>] __might_sleep+0x134/0x1f0 [<ffffffff817a24d4>] rt_spin_lock+0x24/0x60 [<ffffffff811a0c4c>] __d_shrink+0x5c/0xa0 [<ffffffff811a1b2d>] __d_drop+0x1d/0x40 [<ffffffff811a24be>] __d_move+0x8e/0x320 [<ffffffff811a278e>] d_move+0x3e/0x60 [<ffffffff81199598>] vfs_rename+0x198/0x4c0 [<ffffffff8119b093>] sys_renameat+0x213/0x240 [<ffffffff817a2de5>] ? _raw_spin_unlock+0x35/0x60 [<ffffffff8107781c>] ? do_page_fault+0x1ec/0x4b0 [<ffffffff817a32ca>] ? retint_swapgs+0xe/0x13 [<ffffffff813eb0e6>] ? trace_hardirqs_on_thunk+0x3a/0x3f [<ffffffff8119b0db>] sys_rename+0x1b/0x20 [<ffffffff817a3b96>] system_call_fastpath+0x1a/0x1f Since we are only taking the lock during short lived list operations, lets assume for now that it being raw won't be a significant latency concern. Signed-off-by: Paul Gortmaker <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
1 parent 8749dbe commit 8b01af8

File tree

1 file changed

+26
-2
lines changed

1 file changed

+26
-2
lines changed

include/linux/list_bl.h

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#define _LINUX_LIST_BL_H
44

55
#include <linux/list.h>
6+
#include <linux/spinlock.h>
67
#include <linux/bit_spinlock.h>
78

89
/*
@@ -33,13 +34,22 @@
3334

3435
struct hlist_bl_head {
3536
struct hlist_bl_node *first;
37+
#ifdef CONFIG_PREEMPT_RT_BASE
38+
raw_spinlock_t lock;
39+
#endif
3640
};
3741

3842
struct hlist_bl_node {
3943
struct hlist_bl_node *next, **pprev;
4044
};
41-
#define INIT_HLIST_BL_HEAD(ptr) \
42-
((ptr)->first = NULL)
45+
46+
static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
47+
{
48+
h->first = NULL;
49+
#ifdef CONFIG_PREEMPT_RT_BASE
50+
raw_spin_lock_init(&h->lock);
51+
#endif
52+
}
4353

4454
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
4555
{
@@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
119129

120130
static inline void hlist_bl_lock(struct hlist_bl_head *b)
121131
{
132+
#ifndef CONFIG_PREEMPT_RT_BASE
122133
bit_spin_lock(0, (unsigned long *)b);
134+
#else
135+
raw_spin_lock(&b->lock);
136+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
137+
__set_bit(0, (unsigned long *)b);
138+
#endif
139+
#endif
123140
}
124141

125142
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
126143
{
144+
#ifndef CONFIG_PREEMPT_RT_BASE
127145
__bit_spin_unlock(0, (unsigned long *)b);
146+
#else
147+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
148+
__clear_bit(0, (unsigned long *)b);
149+
#endif
150+
raw_spin_unlock(&b->lock);
151+
#endif
128152
}
129153

130154
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)

0 commit comments

Comments
 (0)