Skip to content

Commit 50db04d

Browse files
vegardKAGA-KOKO
authored andcommitted
debugobjects: fix lockdep warning
Daniel J Blueman reported: | ======================================================= | [ INFO: possible circular locking dependency detected ] | 2.6.26-rc5-201c #1 | ------------------------------------------------------- | nscd/3669 is trying to acquire lock: | (&n->list_lock){.+..}, at: [<ffffffff802bab03>] deactivate_slab+0x173/0x1e0 | | but task is already holding lock: | (&obj_hash[i].lock){++..}, at: [<ffffffff803fa56f>] | __debug_object_init+0x2f/0x350 | | which lock already depends on the new lock. There are two locks involved here; the first is a SLUB-local lock, and the second is a debugobjects-local lock. They are basically taken in two different orders: 1. SLUB { debugobjects { ... } } 2. debugobjects { SLUB { ... } } This patch changes pattern #2 by trying to fill the memory pool (e.g. the call into SLUB/kmalloc()) outside the debugobjects lock, so now the two patterns look like this: 1. SLUB { debugobjects { ... } } 2. SLUB { } debugobjects { ... } [ [email protected]: pool_lock needs to be taken irq safe in fill_pool ] Reported-by: Daniel J Blueman <[email protected]> Signed-off-by: Vegard Nossum <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]>
1 parent 952f4a0 commit 50db04d

File tree

1 file changed

+6
-9
lines changed

1 file changed

+6
-9
lines changed

lib/debugobjects.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ static int fill_pool(void)
6868
{
6969
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
7070
struct debug_obj *new;
71+
unsigned long flags;
7172

7273
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
7374
return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
8182
if (!new)
8283
return obj_pool_free;
8384

84-
spin_lock(&pool_lock);
85+
spin_lock_irqsave(&pool_lock, flags);
8586
hlist_add_head(&new->node, &obj_pool);
8687
obj_pool_free++;
87-
spin_unlock(&pool_lock);
88+
spin_unlock_irqrestore(&pool_lock, flags);
8889
}
8990
return obj_pool_free;
9091
}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110111
}
111112

112113
/*
113-
* Allocate a new object. If the pool is empty and no refill possible,
114-
* switch off the debugger.
114+
* Allocate a new object. If the pool is empty, switch off the debugger.
115115
*/
116116
static struct debug_obj *
117117
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118118
{
119119
struct debug_obj *obj = NULL;
120-
int retry = 0;
121120

122-
repeat:
123121
spin_lock(&pool_lock);
124122
if (obj_pool.first) {
125123
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
141139
}
142140
spin_unlock(&pool_lock);
143141

144-
if (fill_pool() && !obj && !retry++)
145-
goto repeat;
146-
147142
return obj;
148143
}
149144

@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261256
struct debug_obj *obj;
262257
unsigned long flags;
263258

259+
fill_pool();
260+
264261
db = get_bucket((unsigned long) addr);
265262

266263
spin_lock_irqsave(&db->lock, flags);

0 commit comments

Comments
 (0)